From 997f858f859e440f853f0cbbcffae1f995f38ba3 Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Sat, 6 Jun 2020 16:34:39 +0100 Subject: [PATCH 01/23] initial update to terraform sdk v2.0.0-rc.1 --- Makefile | 35 +- func-tests/main.tf | 7 +- go.mod | 13 +- go.sum | 133 +- main.go | 2 +- pingaccess/config.go | 3 +- .../data_source_pingaccess_acme_default.go | 2 +- ...ata_source_pingaccess_acme_default_test.go | 2 +- .../data_source_pingaccess_certificate.go | 2 +- ...data_source_pingaccess_certificate_test.go | 4 +- pingaccess/data_source_pingaccess_keypair.go | 2 +- .../data_source_pingaccess_keypair_test.go | 4 +- ...ingaccess_pingfederate_runtime_metadata.go | 2 +- ...cess_pingfederate_runtime_metadata_test.go | 10 +- ...e_pingaccess_trusted_certificate_groups.go | 2 +- ...gaccess_trusted_certificate_groups_test.go | 4 +- pingaccess/diff_suppress_funcs.go | 2 +- pingaccess/provider.go | 12 +- pingaccess/provider_test.go | 191 +- ...ource_pingaccess_access_token_validator.go | 2 +- ..._pingaccess_access_token_validator_test.go | 4 +- pingaccess/resource_pingaccess_acme_server.go | 4 +- .../resource_pingaccess_acme_server_test.go | 6 +- pingaccess/resource_pingaccess_application.go | 2 +- ...esource_pingaccess_application_resource.go | 2 +- ...ce_pingaccess_application_resource_test.go | 6 +- .../resource_pingaccess_application_test.go | 11 +- ...source_pingaccess_auth_token_management.go | 2 +- ...e_pingaccess_auth_token_management_test.go | 6 +- .../resource_pingaccess_authn_req_list.go | 2 +- ...resource_pingaccess_authn_req_list_test.go | 6 +- pingaccess/resource_pingaccess_certificate.go | 2 +- .../resource_pingaccess_certificate_test.go | 4 +- .../resource_pingaccess_engine_listener.go | 2 +- ...esource_pingaccess_engine_listener_test.go | 6 +- .../resource_pingaccess_hsm_provider.go | 2 +- .../resource_pingaccess_hsm_provider_test.go | 4 +- ...gaccess_http_config_request_host_source.go | 2 +- ...ss_http_config_request_host_source_test.go | 6 +- .../resource_pingaccess_https_listener.go | 2 +- ...resource_pingaccess_https_listener_test.go | 6 +- .../resource_pingaccess_identity_mapping.go | 2 +- ...source_pingaccess_identity_mapping_test.go | 4 +- pingaccess/resource_pingaccess_keypair.go | 7 +- .../resource_pingaccess_keypair_test.go | 4 +- .../resource_pingaccess_oauth_server.go | 2 +- .../resource_pingaccess_oauth_server_test.go | 6 +- .../resource_pingaccess_pingfederate_admin.go | 2 +- ...urce_pingaccess_pingfederate_admin_test.go | 11 +- .../resource_pingaccess_pingfederate_oauth.go | 2 +- ...urce_pingaccess_pingfederate_oauth_test.go | 6 +- ...esource_pingaccess_pingfederate_runtime.go | 2 +- ...ce_pingaccess_pingfederate_runtime_test.go | 11 +- pingaccess/resource_pingaccess_rule.go | 7 +- pingaccess/resource_pingaccess_rule_test.go | 4 +- pingaccess/resource_pingaccess_rulesets.go | 2 +- .../resource_pingaccess_rulesets_test.go | 4 +- pingaccess/resource_pingaccess_site.go | 2 +- .../resource_pingaccess_site_authenticator.go | 2 +- ...urce_pingaccess_site_authenticator_test.go | 6 +- pingaccess/resource_pingaccess_site_test.go | 6 +- ...resource_pingaccess_third_party_service.go | 4 +- ...rce_pingaccess_third_party_service_test.go | 6 +- ...e_pingaccess_trusted_certificate_groups.go | 2 +- ...gaccess_trusted_certificate_groups_test.go | 6 +- pingaccess/resource_pingaccess_virtualhost.go | 2 +- .../resource_pingaccess_virtualhost_test.go | 6 +- pingaccess/resource_pingaccess_websession.go | 2 +- .../resource_pingaccess_websession_test.go | 6 +- pingaccess/structures.go | 4 +- .../colorstring => Azure/go-ansiterm}/LICENSE | 2 +- vendor/github.com/Azure/go-ansiterm/README.md | 12 + .../github.com/Azure/go-ansiterm/constants.go | 188 + .../github.com/Azure/go-ansiterm/context.go | 7 + .../Azure/go-ansiterm/csi_entry_state.go | 49 + .../Azure/go-ansiterm/csi_param_state.go | 38 + .../go-ansiterm/escape_intermediate_state.go | 36 + .../Azure/go-ansiterm/escape_state.go | 47 + .../Azure/go-ansiterm/event_handler.go | 90 + .../Azure/go-ansiterm/ground_state.go | 24 + .../Azure/go-ansiterm/osc_string_state.go | 31 + vendor/github.com/Azure/go-ansiterm/parser.go | 151 + .../go-ansiterm/parser_action_helpers.go | 99 + .../Azure/go-ansiterm/parser_actions.go | 119 + vendor/github.com/Azure/go-ansiterm/states.go | 71 + .../github.com/Azure/go-ansiterm/utilities.go | 21 + .../Azure/go-ansiterm/winterm/ansi.go | 182 + .../Azure/go-ansiterm/winterm/api.go | 327 + .../go-ansiterm/winterm/attr_translation.go | 100 + .../go-ansiterm/winterm/cursor_helpers.go | 101 + .../go-ansiterm/winterm/erase_helpers.go | 84 + .../go-ansiterm/winterm/scroll_helper.go | 118 + .../Azure/go-ansiterm/winterm/utilities.go | 9 + .../go-ansiterm/winterm/win_event_handler.go | 743 ++ vendor/github.com/Microsoft/hcsshim/LICENSE | 21 - .../Microsoft/hcsshim/osversion/osversion.go | 51 - .../hcsshim/osversion/windowsbuilds.go | 10 - vendor/github.com/Nvveen/Gotty/LICENSE | 26 + vendor/github.com/Nvveen/Gotty/README | 5 + vendor/github.com/Nvveen/Gotty/TODO | 3 + vendor/github.com/Nvveen/Gotty/attributes.go | 514 + vendor/github.com/Nvveen/Gotty/gotty.go | 238 + vendor/github.com/Nvveen/Gotty/parser.go | 362 + vendor/github.com/Nvveen/Gotty/types.go | 23 + .../github.com/apparentlymart/go-cidr/LICENSE | 19 - .../apparentlymart/go-cidr/cidr/cidr.go | 218 - .../apparentlymart/go-cidr/cidr/wrangling.go | 37 - vendor/github.com/armon/go-radix/.travis.yml | 3 - vendor/github.com/armon/go-radix/LICENSE | 20 - vendor/github.com/armon/go-radix/README.md | 38 - vendor/github.com/armon/go-radix/go.mod | 1 - vendor/github.com/armon/go-radix/radix.go | 540 - vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 2 +- .../aws/aws-sdk-go/aws/awserr/error.go | 23 +- .../aws/aws-sdk-go/aws/awserr/types.go | 31 +- .../aws/aws-sdk-go/aws/awsutil/equal.go | 2 +- .../aws/aws-sdk-go/aws/awsutil/path_value.go | 11 +- .../aws-sdk-go/aws/awsutil/string_value.go | 25 +- .../aws/aws-sdk-go/aws/client/client.go | 4 +- .../aws-sdk-go/aws/client/default_retryer.go | 151 +- .../aws/aws-sdk-go/aws/client/logger.go | 18 +- .../aws-sdk-go/aws/client/no_op_retryer.go | 28 - .../github.com/aws/aws-sdk-go/aws/config.go | 22 +- .../aws/{context_1_5.go => context.go} | 40 +- ...ntext_background_1_5.go => context_1_6.go} | 15 - .../aws/aws-sdk-go/aws/context_1_7.go | 9 + .../aws/aws-sdk-go/aws/context_1_9.go | 11 - .../aws-sdk-go/aws/context_background_1_7.go | 20 - .../aws/aws-sdk-go/aws/context_sleep.go | 24 - .../aws/aws-sdk-go/aws/convert_types.go | 531 - .../aws-sdk-go/aws/corehandlers/handlers.go | 64 +- .../aws-sdk-go/aws/corehandlers/user_agent.go | 2 +- .../aws-sdk-go/aws/credentials/credentials.go | 42 - .../ec2rolecreds/ec2_role_provider.go | 6 +- .../aws/credentials/endpointcreds/provider.go | 19 +- .../aws/credentials/processcreds/provider.go | 425 - .../stscreds/assume_role_provider.go | 22 +- .../stscreds/web_identity_provider.go | 100 - .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 65 +- .../aws/aws-sdk-go/aws/csm/enable.go | 34 +- .../aws/aws-sdk-go/aws/csm/metric.go | 56 - .../aws/aws-sdk-go/aws/csm/metric_chan.go | 11 +- .../aws-sdk-go/aws/csm/metric_exception.go | 26 - .../aws/aws-sdk-go/aws/csm/reporter.go | 71 +- .../aws/aws-sdk-go/aws/ec2metadata/api.go | 46 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 9 +- .../aws/aws-sdk-go/aws/endpoints/decode.go | 28 - .../aws/aws-sdk-go/aws/endpoints/defaults.go | 2441 +--- .../aws/endpoints/dep_service_ids.go | 141 - .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 9 +- .../aws/aws-sdk-go/aws/endpoints/v3model.go | 5 +- .../aws/endpoints/v3model_codegen.go | 24 +- .../aws/request/connection_reset_error.go | 17 +- .../request/connection_reset_error_other.go | 11 + .../aws/aws-sdk-go/aws/request/handlers.go | 52 +- .../aws-sdk-go/aws/request/offset_reader.go | 15 +- .../aws/aws-sdk-go/aws/request/request.go | 229 +- .../aws/aws-sdk-go/aws/request/request_1_8.go | 5 +- .../aws/request/request_pagination.go | 2 +- .../aws/aws-sdk-go/aws/request/retryer.go | 167 +- .../aws/aws-sdk-go/aws/request/validation.go | 25 - .../aws/session/cabundle_transport.go | 26 - .../aws/session/cabundle_transport_1_5.go | 22 - .../aws/session/cabundle_transport_1_6.go | 23 - .../aws/aws-sdk-go/aws/session/credentials.go | 259 - .../aws/aws-sdk-go/aws/session/doc.go | 208 +- .../aws/aws-sdk-go/aws/session/env_config.go | 65 +- .../aws/aws-sdk-go/aws/session/session.go | 289 +- .../aws-sdk-go/aws/session/shared_config.go | 364 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 83 +- vendor/github.com/aws/aws-sdk-go/aws/types.go | 20 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/internal/ini/ini_parser.go | 11 +- .../aws/aws-sdk-go/internal/ini/statement.go | 2 +- .../aws/aws-sdk-go/internal/sdkio/byte.go | 12 - .../aws/aws-sdk-go/internal/sdkmath/floor.go | 15 - .../internal/sdkmath/floor_go1.9.go | 56 - .../aws/aws-sdk-go/internal/sdkrand/read.go | 11 - .../aws-sdk-go/internal/sdkrand/read_1_5.go | 24 - .../internal/shareddefaults/ecs_container.go | 2 +- .../aws/aws-sdk-go/private/protocol/host.go | 51 +- .../private/protocol/host_prefix.go | 54 - .../private/protocol/json/jsonutil/build.go | 296 - .../protocol/json/jsonutil/unmarshal.go | 250 - .../private/protocol/query/build.go | 2 +- .../private/protocol/query/unmarshal.go | 2 +- .../private/protocol/query/unmarshal_error.go | 77 +- .../aws-sdk-go/private/protocol/rest/build.go | 27 +- .../private/protocol/rest/unmarshal.go | 36 +- .../private/protocol/restxml/restxml.go | 8 +- .../aws-sdk-go/private/protocol/timestamp.go | 20 +- .../private/protocol/xml/xmlutil/build.go | 2 +- .../private/protocol/xml/xmlutil/sort.go | 32 - .../private/protocol/xml/xmlutil/unmarshal.go | 19 - .../protocol/xml/xmlutil/xml_to_struct.go | 13 +- .../aws/aws-sdk-go/service/s3/api.go | 3065 +---- .../aws-sdk-go/service/s3/bucket_location.go | 3 +- .../aws-sdk-go/service/s3/customizations.go | 4 +- .../aws/aws-sdk-go/service/s3/doc_custom.go | 14 - .../aws/aws-sdk-go/service/s3/sse.go | 64 +- .../aws-sdk-go/service/s3/statusok_error.go | 4 +- .../aws-sdk-go/service/s3/unmarshal_error.go | 47 +- .../aws/aws-sdk-go/service/sts/api.go | 1140 +- .../aws-sdk-go/service/sts/customizations.go | 11 +- .../aws/aws-sdk-go/service/sts/doc.go | 76 +- .../aws/aws-sdk-go/service/sts/errors.go | 2 +- .../service/sts/stsiface/interface.go | 96 - .../github.com/bgentry/speakeasy/.gitignore | 2 - vendor/github.com/bgentry/speakeasy/LICENSE | 24 - .../bgentry/speakeasy/LICENSE_WINDOWS | 201 - vendor/github.com/bgentry/speakeasy/Readme.md | 30 - .../github.com/bgentry/speakeasy/speakeasy.go | 49 - .../bgentry/speakeasy/speakeasy_unix.go | 93 - .../bgentry/speakeasy/speakeasy_windows.go | 41 - vendor/github.com/cenkalti/backoff/.gitignore | 22 - .../backoff/v3}/.gitignore | 0 .../cenkalti/backoff/{ => v3}/.travis.yml | 0 .../cenkalti/backoff/{ => v3}/LICENSE | 0 .../cenkalti/backoff/{ => v3}/README.md | 0 .../cenkalti/backoff/{ => v3}/backoff.go | 0 .../cenkalti/backoff/{ => v3}/context.go | 0 .../cenkalti/backoff/{ => v3}/exponential.go | 0 vendor/github.com/cenkalti/backoff/v3/go.mod | 3 + .../cenkalti/backoff/{ => v3}/retry.go | 0 .../cenkalti/backoff/{ => v3}/ticker.go | 0 .../cenkalti/backoff/{ => v3}/tries.go | 0 .../containerd/continuity/fs/copy.go | 176 - .../containerd/continuity/fs/copy_linux.go | 147 - .../containerd/continuity/fs/copy_unix.go | 112 - .../containerd/continuity/fs/copy_windows.go | 49 - .../containerd/continuity/fs/diff.go | 326 - .../containerd/continuity/fs/diff_unix.go | 74 - .../containerd/continuity/fs/diff_windows.go | 48 - .../containerd/continuity/fs/dtype_linux.go | 103 - .../github.com/containerd/continuity/fs/du.go | 38 - .../containerd/continuity/fs/du_unix.go | 110 - .../containerd/continuity/fs/du_windows.go | 82 - .../containerd/continuity/fs/hardlink.go | 43 - .../containerd/continuity/fs/hardlink_unix.go | 34 - .../continuity/fs/hardlink_windows.go | 23 - .../containerd/continuity/fs/path.go | 313 - .../continuity/fs/stat_darwinfreebsd.go | 44 - .../continuity/fs/stat_linuxopenbsd.go | 45 - .../containerd/continuity/fs/time.go | 29 - .../continuity/syscallx/syscall_unix.go | 26 - .../continuity/syscallx/syscall_windows.go | 112 - .../containerd/continuity/sysx/README.md | 3 - .../containerd/continuity/sysx/file_posix.go | 128 - .../containerd/continuity/sysx/generate.sh | 52 - .../continuity/sysx/nodata_linux.go | 23 - .../continuity/sysx/nodata_solaris.go | 24 - .../containerd/continuity/sysx/nodata_unix.go | 25 - .../containerd/continuity/sysx/xattr.go | 117 - .../continuity/sysx/xattr_unsupported.go | 67 - vendor/github.com/docker/distribution/LICENSE | 202 - .../docker/distribution/digestset/set.go | 247 - .../docker/distribution/reference/helpers.go | 42 - .../distribution/reference/normalize.go | 199 - .../distribution/reference/reference.go | 433 - .../docker/distribution/reference/regexp.go | 143 - .../registry/api/errcode/errors.go | 267 - .../registry/api/errcode/handler.go | 40 - .../registry/api/errcode/register.go | 138 - vendor/github.com/docker/docker/AUTHORS | 2082 --- vendor/github.com/docker/docker/LICENSE | 191 - vendor/github.com/docker/docker/NOTICE | 19 - vendor/github.com/docker/docker/api/README.md | 42 - vendor/github.com/docker/docker/api/common.go | 11 - .../docker/docker/api/common_unix.go | 6 - .../docker/docker/api/common_windows.go | 8 - .../docker/docker/api/swagger-gen.yaml | 12 - .../github.com/docker/docker/api/swagger.yaml | 10414 ---------------- .../docker/docker/api/types/events/events.go | 52 - .../docker/api/types/image/image_history.go | 37 - .../docker/docker/api/types/swarm/common.go | 40 - .../docker/docker/api/types/swarm/config.go | 40 - .../docker/api/types/swarm/container.go | 76 - .../docker/docker/api/types/swarm/network.go | 121 - .../docker/docker/api/types/swarm/node.go | 115 - .../docker/docker/api/types/swarm/runtime.go | 27 - .../docker/api/types/swarm/runtime/gen.go | 3 - .../api/types/swarm/runtime/plugin.pb.go | 712 -- .../api/types/swarm/runtime/plugin.proto | 20 - .../docker/docker/api/types/swarm/secret.go | 36 - .../docker/docker/api/types/swarm/service.go | 124 - .../docker/docker/api/types/swarm/swarm.go | 227 - .../docker/docker/api/types/swarm/task.go | 192 - .../docker/api/types/time/duration_convert.go | 12 - .../docker/docker/api/types/time/timestamp.go | 129 - .../docker/api/types/volume/volume_create.go | 29 - .../docker/api/types/volume/volume_list.go | 23 - .../github.com/docker/docker/client/README.md | 35 - .../docker/docker/client/build_cancel.go | 16 - .../docker/docker/client/build_prune.go | 45 - .../docker/docker/client/checkpoint_create.go | 14 - .../docker/docker/client/checkpoint_delete.go | 20 - .../docker/docker/client/checkpoint_list.go | 28 - .../github.com/docker/docker/client/client.go | 309 - .../docker/docker/client/client_deprecated.go | 23 - .../docker/docker/client/client_unix.go | 9 - .../docker/docker/client/client_windows.go | 7 - .../docker/docker/client/config_create.go | 25 - .../docker/docker/client/config_inspect.go | 36 - .../docker/docker/client/config_list.go | 38 - .../docker/docker/client/config_remove.go | 13 - .../docker/docker/client/config_update.go | 21 - .../docker/docker/client/container_attach.go | 57 - .../docker/docker/client/container_commit.go | 55 - .../docker/docker/client/container_copy.go | 103 - .../docker/docker/client/container_create.go | 52 - .../docker/docker/client/container_diff.go | 23 - .../docker/docker/client/container_exec.go | 54 - .../docker/docker/client/container_export.go | 19 - .../docker/docker/client/container_inspect.go | 53 - .../docker/docker/client/container_kill.go | 16 - .../docker/docker/client/container_list.go | 56 - .../docker/docker/client/container_logs.go | 80 - .../docker/docker/client/container_pause.go | 10 - .../docker/docker/client/container_prune.go | 36 - .../docker/docker/client/container_remove.go | 27 - .../docker/docker/client/container_rename.go | 15 - .../docker/docker/client/container_resize.go | 29 - .../docker/docker/client/container_restart.go | 22 - .../docker/docker/client/container_start.go | 23 - .../docker/docker/client/container_stats.go | 26 - .../docker/docker/client/container_stop.go | 26 - .../docker/docker/client/container_top.go | 28 - .../docker/docker/client/container_unpause.go | 10 - .../docker/docker/client/container_update.go | 21 - .../docker/docker/client/container_wait.go | 83 - .../docker/docker/client/disk_usage.go | 26 - .../docker/client/distribution_inspect.go | 38 - .../github.com/docker/docker/client/errors.go | 138 - .../github.com/docker/docker/client/events.go | 101 - .../github.com/docker/docker/client/hijack.go | 143 - .../docker/docker/client/image_build.go | 146 - .../docker/docker/client/image_create.go | 37 - .../docker/docker/client/image_history.go | 22 - .../docker/docker/client/image_import.go | 40 - .../docker/docker/client/image_inspect.go | 32 - .../docker/docker/client/image_list.go | 45 - .../docker/docker/client/image_load.go | 29 - .../docker/docker/client/image_prune.go | 36 - .../docker/docker/client/image_pull.go | 64 - .../docker/docker/client/image_push.go | 55 - .../docker/docker/client/image_remove.go | 31 - .../docker/docker/client/image_save.go | 21 - .../docker/docker/client/image_search.go | 51 - .../docker/docker/client/image_tag.go | 37 - .../github.com/docker/docker/client/info.go | 26 - .../docker/docker/client/interface.go | 199 - .../docker/client/interface_experimental.go | 18 - .../docker/docker/client/interface_stable.go | 10 - .../github.com/docker/docker/client/login.go | 25 - .../docker/docker/client/network_connect.go | 19 - .../docker/docker/client/network_create.go | 25 - .../docker/client/network_disconnect.go | 15 - .../docker/docker/client/network_inspect.go | 49 - .../docker/docker/client/network_list.go | 31 - .../docker/docker/client/network_prune.go | 36 - .../docker/docker/client/network_remove.go | 10 - .../docker/docker/client/node_inspect.go | 32 - .../docker/docker/client/node_list.go | 36 - .../docker/docker/client/node_remove.go | 20 - .../docker/docker/client/node_update.go | 18 - .../docker/docker/client/options.go | 172 - .../github.com/docker/docker/client/ping.go | 64 - .../docker/docker/client/plugin_create.go | 23 - .../docker/docker/client/plugin_disable.go | 19 - .../docker/docker/client/plugin_enable.go | 19 - .../docker/docker/client/plugin_inspect.go | 31 - .../docker/docker/client/plugin_install.go | 113 - .../docker/docker/client/plugin_list.go | 32 - .../docker/docker/client/plugin_push.go | 16 - .../docker/docker/client/plugin_remove.go | 20 - .../docker/docker/client/plugin_set.go | 12 - .../docker/docker/client/plugin_upgrade.go | 39 - .../docker/docker/client/request.go | 273 - .../docker/docker/client/secret_create.go | 25 - .../docker/docker/client/secret_inspect.go | 36 - .../docker/docker/client/secret_list.go | 38 - .../docker/docker/client/secret_remove.go | 13 - .../docker/docker/client/secret_update.go | 21 - .../docker/docker/client/service_create.go | 166 - .../docker/docker/client/service_inspect.go | 37 - .../docker/docker/client/service_list.go | 35 - .../docker/docker/client/service_logs.go | 52 - .../docker/docker/client/service_remove.go | 10 - .../docker/docker/client/service_update.go | 94 - .../docker/client/swarm_get_unlock_key.go | 21 - .../docker/docker/client/swarm_init.go | 21 - .../docker/docker/client/swarm_inspect.go | 21 - .../docker/docker/client/swarm_join.go | 14 - .../docker/docker/client/swarm_leave.go | 17 - .../docker/docker/client/swarm_unlock.go | 14 - .../docker/docker/client/swarm_update.go | 22 - .../docker/docker/client/task_inspect.go | 32 - .../docker/docker/client/task_list.go | 35 - .../docker/docker/client/task_logs.go | 51 - .../docker/docker/client/transport.go | 17 - .../github.com/docker/docker/client/utils.go | 34 - .../docker/docker/client/version.go | 21 - .../docker/docker/client/volume_create.go | 21 - .../docker/docker/client/volume_inspect.go | 38 - .../docker/docker/client/volume_list.go | 32 - .../docker/docker/client/volume_prune.go | 36 - .../docker/docker/client/volume_remove.go | 21 - .../github.com/docker/docker/errdefs/defs.go | 69 - .../github.com/docker/docker/errdefs/doc.go | 8 - .../docker/docker/errdefs/helpers.go | 227 - .../docker/docker/errdefs/http_helpers.go | 172 - vendor/github.com/docker/docker/errdefs/is.go | 107 - .../docker/pkg/archive/archive_linux.go | 261 - .../docker/pkg/archive/archive_other.go | 7 - .../docker/pkg/archive/changes_windows.go | 34 - .../docker/pkg/idtools/idtools_windows.go | 25 - .../docker/docker/pkg/mount/mount.go | 159 - .../docker/pkg/mount/mounter_unsupported.go | 7 - .../docker/pkg/mount/mountinfo_linux.go | 144 - .../docker/pkg/mount/mountinfo_windows.go | 6 - .../docker/docker/pkg/mount/unmount_unix.go | 22 - .../docker/pkg/mount/unmount_unsupported.go | 7 - .../docker/docker/pkg/system/args_windows.go | 16 - .../docker/docker/pkg/system/init_unix.go | 12 - .../docker/docker/pkg/system/init_windows.go | 41 - .../docker/docker/pkg/system/lcow.go | 32 - .../docker/docker/pkg/system/path_unix.go | 10 - .../docker/docker/pkg/system/path_windows.go | 24 - .../docker/go-connections/sockets/README.md | 0 .../go-connections/sockets/inmem_socket.go | 81 - .../docker/go-connections/sockets/proxy.go | 51 - .../docker/go-connections/sockets/sockets.go | 38 - .../go-connections/sockets/sockets_unix.go | 35 - .../go-connections/sockets/sockets_windows.go | 27 - .../go-connections/sockets/tcp_socket.go | 22 - .../go-connections/sockets/unix_socket.go | 32 - .../go-connections/tlsconfig/certpool_go17.go | 18 - .../tlsconfig/certpool_other.go | 13 - .../docker/go-connections/tlsconfig/config.go | 254 - .../tlsconfig/config_client_ciphers.go | 17 - .../tlsconfig/config_legacy_client_ciphers.go | 15 - vendor/github.com/fatih/color/.travis.yml | 5 - vendor/github.com/fatih/color/Gopkg.lock | 27 - vendor/github.com/fatih/color/Gopkg.toml | 30 - vendor/github.com/fatih/color/LICENSE.md | 20 - vendor/github.com/fatih/color/README.md | 179 - vendor/github.com/fatih/color/color.go | 603 - vendor/github.com/fatih/color/doc.go | 133 - vendor/github.com/gogo/protobuf/AUTHORS | 15 - vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 - vendor/github.com/gogo/protobuf/LICENSE | 35 - .../github.com/gogo/protobuf/proto/Makefile | 43 - .../github.com/gogo/protobuf/proto/clone.go | 258 - .../github.com/gogo/protobuf/proto/decode.go | 428 - .../github.com/gogo/protobuf/proto/discard.go | 350 - .../gogo/protobuf/proto/duration.go | 100 - .../gogo/protobuf/proto/duration_gogo.go | 49 - .../github.com/gogo/protobuf/proto/encode.go | 203 - .../gogo/protobuf/proto/encode_gogo.go | 33 - .../github.com/gogo/protobuf/proto/equal.go | 300 - .../gogo/protobuf/proto/extensions.go | 604 - .../gogo/protobuf/proto/extensions_gogo.go | 368 - vendor/github.com/gogo/protobuf/proto/lib.go | 987 -- .../gogo/protobuf/proto/lib_gogo.go | 50 - .../gogo/protobuf/proto/message_set.go | 314 - .../gogo/protobuf/proto/pointer_reflect.go | 357 - .../protobuf/proto/pointer_reflect_gogo.go | 59 - .../gogo/protobuf/proto/pointer_unsafe.go | 308 - .../protobuf/proto/pointer_unsafe_gogo.go | 56 - .../gogo/protobuf/proto/properties.go | 608 - .../gogo/protobuf/proto/properties_gogo.go | 36 - .../gogo/protobuf/proto/skip_gogo.go | 119 - .../gogo/protobuf/proto/table_marshal.go | 3006 ----- .../gogo/protobuf/proto/table_marshal_gogo.go | 388 - .../gogo/protobuf/proto/table_merge.go | 657 - .../gogo/protobuf/proto/table_unmarshal.go | 2245 ---- .../protobuf/proto/table_unmarshal_gogo.go | 385 - vendor/github.com/gogo/protobuf/proto/text.go | 928 -- .../gogo/protobuf/proto/text_gogo.go | 57 - .../gogo/protobuf/proto/text_parser.go | 1018 -- .../gogo/protobuf/proto/timestamp.go | 113 - .../gogo/protobuf/proto/timestamp_gogo.go | 49 - .../gogo/protobuf/proto/wrappers.go | 1888 --- .../gogo/protobuf/proto/wrappers_gogo.go | 113 - .../golang/protobuf/ptypes/empty/empty.pb.go | 85 + .../protobuf/ptypes/empty/empty.proto} | 37 +- vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CONTRIBUTING.md | 10 - vendor/github.com/google/uuid/CONTRIBUTORS | 9 - vendor/github.com/google/uuid/LICENSE | 27 - vendor/github.com/google/uuid/README.md | 19 - vendor/github.com/google/uuid/dce.go | 80 - vendor/github.com/google/uuid/doc.go | 12 - vendor/github.com/google/uuid/go.mod | 1 - vendor/github.com/google/uuid/hash.go | 53 - vendor/github.com/google/uuid/marshal.go | 37 - vendor/github.com/google/uuid/node.go | 90 - vendor/github.com/google/uuid/node_js.go | 12 - vendor/github.com/google/uuid/node_net.go | 33 - vendor/github.com/google/uuid/sql.go | 59 - vendor/github.com/google/uuid/time.go | 123 - vendor/github.com/google/uuid/util.go | 43 - vendor/github.com/google/uuid/uuid.go | 245 - vendor/github.com/google/uuid/version1.go | 44 - vendor/github.com/google/uuid/version4.go | 38 - .../go-colorable => hashicorp/go-cty}/LICENSE | 4 +- .../hashicorp/go-cty/cty/capsule.go | 128 + .../hashicorp/go-cty/cty/capsule_ops.go | 132 + .../hashicorp/go-cty/cty/collection.go | 34 + .../go-cty/cty/convert/compare_types.go | 165 + .../go-cty/cty/convert/conversion.go | 190 + .../go-cty/cty/convert/conversion_capsule.go | 31 + .../cty/convert/conversion_collection.go | 488 + .../go-cty/cty/convert/conversion_dynamic.go | 33 + .../go-cty/cty/convert/conversion_object.go | 76 + .../cty/convert/conversion_primitive.go | 57 + .../go-cty/cty/convert/conversion_tuple.go | 71 + .../hashicorp/go-cty/cty/convert/doc.go | 15 + .../go-cty/cty/convert/mismatch_msg.go | 220 + .../hashicorp/go-cty/cty/convert/public.go | 83 + .../go-cty/cty/convert/sort_types.go | 69 + .../hashicorp/go-cty/cty/convert/unify.go | 357 + vendor/github.com/hashicorp/go-cty/cty/doc.go | 18 + .../hashicorp/go-cty/cty/element_iterator.go | 194 + .../github.com/hashicorp/go-cty/cty/error.go | 55 + vendor/github.com/hashicorp/go-cty/cty/gob.go | 204 + .../hashicorp/go-cty/cty/gocty/doc.go | 7 + .../hashicorp/go-cty/cty/gocty/helpers.go | 43 + .../hashicorp/go-cty/cty/gocty/in.go | 548 + .../hashicorp/go-cty/cty/gocty/out.go | 686 + .../go-cty/cty/gocty/type_implied.go | 108 + .../github.com/hashicorp/go-cty/cty/helper.go | 99 + .../github.com/hashicorp/go-cty/cty/json.go | 176 + .../hashicorp/go-cty/cty/json/doc.go | 11 + .../hashicorp/go-cty/cty/json/marshal.go | 193 + .../hashicorp/go-cty/cty/json/simple.go | 41 + .../hashicorp/go-cty/cty/json/type.go | 23 + .../hashicorp/go-cty/cty/json/type_implied.go | 170 + .../hashicorp/go-cty/cty/json/unmarshal.go | 459 + .../hashicorp/go-cty/cty/json/value.go | 65 + .../hashicorp/go-cty/cty/list_type.go | 74 + .../hashicorp/go-cty/cty/map_type.go | 74 + .../github.com/hashicorp/go-cty/cty/marks.go | 296 + .../go-cty/cty/msgpack/doc.go | 0 .../go-cty/cty/msgpack/dynamic.go | 2 +- .../go-cty/cty/msgpack/infinity.go | 0 .../go-cty/cty/msgpack/marshal.go | 4 +- .../go-cty/cty/msgpack/type_implied.go | 2 +- .../go-cty/cty/msgpack/unknown.go | 0 .../go-cty/cty/msgpack/unmarshal.go | 2 +- .../github.com/hashicorp/go-cty/cty/null.go | 14 + .../hashicorp/go-cty/cty/object_type.go | 135 + .../github.com/hashicorp/go-cty/cty/path.go | 270 + .../hashicorp/go-cty/cty/path_set.go | 198 + .../hashicorp/go-cty/cty/primitive_type.go | 122 + .../hashicorp/go-cty/cty/set/gob.go | 76 + .../hashicorp/go-cty/cty/set/iterator.go | 15 + .../hashicorp/go-cty/cty/set/ops.go | 210 + .../hashicorp/go-cty/cty/set/rules.go | 43 + .../hashicorp/go-cty/cty/set/set.go | 62 + .../hashicorp/go-cty/cty/set_helper.go | 132 + .../hashicorp/go-cty/cty/set_internals.go | 244 + .../hashicorp/go-cty/cty/set_type.go | 72 + .../hashicorp/go-cty/cty/tuple_type.go | 121 + .../github.com/hashicorp/go-cty/cty/type.go | 120 + .../hashicorp/go-cty/cty/type_conform.go | 139 + .../hashicorp/go-cty/cty/types_to_register.go | 57 + .../hashicorp/go-cty/cty/unknown.go | 84 + .../hashicorp/go-cty/cty/unknown_as_null.go | 64 + .../github.com/hashicorp/go-cty/cty/value.go | 108 + .../hashicorp/go-cty/cty/value_init.go | 324 + .../hashicorp/go-cty/cty/value_ops.go | 1290 ++ .../github.com/hashicorp/go-cty/cty/walk.go | 182 + .../github.com/hashicorp/go-plugin/client.go | 18 +- vendor/github.com/hashicorp/go-plugin/go.mod | 14 +- vendor/github.com/hashicorp/go-plugin/go.sum | 55 +- .../hashicorp/go-plugin/grpc_client.go | 8 +- .../hashicorp/go-plugin/grpc_server.go | 19 +- .../hashicorp/go-plugin/grpc_stdio.go | 207 + .../go-plugin/internal/plugin/gen.go | 2 +- .../internal/plugin/grpc_broker.pb.go | 48 +- .../internal/plugin/grpc_broker.proto | 2 - .../internal/plugin/grpc_controller.pb.go | 42 +- .../internal/plugin/grpc_stdio.pb.go | 233 + .../internal/plugin/grpc_stdio.proto | 30 + .../github.com/hashicorp/go-plugin/server.go | 10 +- .../github.com/hashicorp/go-plugin/testing.go | 2 +- vendor/github.com/hashicorp/hcl/.gitignore | 9 - vendor/github.com/hashicorp/hcl/.travis.yml | 12 - vendor/github.com/hashicorp/hcl/Makefile | 18 - vendor/github.com/hashicorp/hcl/README.md | 125 - vendor/github.com/hashicorp/hcl/appveyor.yml | 19 - vendor/github.com/hashicorp/hcl/decoder.go | 724 -- vendor/github.com/hashicorp/hcl/hcl.go | 11 - .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 - .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 - .../hashicorp/hcl/hcl/parser/error.go | 17 - .../hashicorp/hcl/hcl/parser/parser.go | 520 - .../hashicorp/hcl/hcl/scanner/scanner.go | 651 - .../hashicorp/hcl/hcl/strconv/quote.go | 241 - .../hashicorp/hcl/hcl/token/position.go | 46 - .../hashicorp/hcl/hcl/token/token.go | 219 - .../hashicorp/hcl/json/parser/flatten.go | 117 - .../hashicorp/hcl/json/parser/parser.go | 313 - .../hashicorp/hcl/json/scanner/scanner.go | 451 - .../hashicorp/hcl/json/token/position.go | 46 - .../hashicorp/hcl/json/token/token.go | 118 - vendor/github.com/hashicorp/hcl/lex.go | 38 - vendor/github.com/hashicorp/hcl/parse.go | 39 - .../hashicorp/hcl/v2/ext/dynblock/README.md | 184 - .../hcl/v2/ext/dynblock/expand_body.go | 262 - .../hcl/v2/ext/dynblock/expand_spec.go | 215 - .../hcl/v2/ext/dynblock/expr_wrap.go | 42 - .../hcl/v2/ext/dynblock/iteration.go | 66 - .../hashicorp/hcl/v2/ext/dynblock/public.go | 47 - .../hashicorp/hcl/v2/ext/dynblock/schema.go | 50 - .../hcl/v2/ext/dynblock/unknown_body.go | 84 - .../hcl/v2/ext/dynblock/variables.go | 209 - .../hcl/v2/ext/dynblock/variables_hcldec.go | 43 - .../hashicorp/hcl/v2/ext/typeexpr/README.md | 67 - .../hashicorp/hcl/v2/ext/typeexpr/doc.go | 11 - .../hashicorp/hcl/v2/ext/typeexpr/get_type.go | 196 - .../hashicorp/hcl/v2/ext/typeexpr/public.go | 129 - .../hashicorp/hcl/v2/gohcl/decode.go | 304 - .../github.com/hashicorp/hcl/v2/gohcl/doc.go | 53 - .../hashicorp/hcl/v2/gohcl/encode.go | 191 - .../hashicorp/hcl/v2/gohcl/schema.go | 174 - .../hashicorp/hcl/v2/gohcl/types.go | 16 - .../hashicorp/hcl/v2/hcldec/block_labels.go | 21 - .../hashicorp/hcl/v2/hcldec/decode.go | 36 - .../github.com/hashicorp/hcl/v2/hcldec/doc.go | 12 - .../github.com/hashicorp/hcl/v2/hcldec/gob.go | 23 - .../hashicorp/hcl/v2/hcldec/public.go | 81 - .../hashicorp/hcl/v2/hcldec/schema.go | 36 - .../hashicorp/hcl/v2/hcldec/spec.go | 1567 --- .../hashicorp/hcl/v2/hcldec/variables.go | 36 - .../github.com/hashicorp/hcl/v2/hcled/doc.go | 4 - .../hashicorp/hcl/v2/hcled/navigation.go | 34 - .../hashicorp/hcl/v2/hclparse/parser.go | 135 - .../hashicorp/hcl/v2/hclwrite/ast.go | 121 - .../hcl/v2/hclwrite/ast_attribute.go | 48 - .../hashicorp/hcl/v2/hclwrite/ast_block.go | 118 - .../hashicorp/hcl/v2/hclwrite/ast_body.go | 219 - .../hcl/v2/hclwrite/ast_expression.go | 201 - .../hashicorp/hcl/v2/hclwrite/doc.go | 11 - .../hashicorp/hcl/v2/hclwrite/format.go | 463 - .../hashicorp/hcl/v2/hclwrite/generate.go | 250 - .../hcl/v2/hclwrite/native_node_sorter.go | 23 - .../hashicorp/hcl/v2/hclwrite/node.go | 260 - .../hashicorp/hcl/v2/hclwrite/parser.go | 599 - .../hashicorp/hcl/v2/hclwrite/public.go | 44 - .../hashicorp/hcl/v2/hclwrite/tokens.go | 122 - .../github.com/hashicorp/hcl/v2/json/ast.go | 121 - .../hashicorp/hcl/v2/json/didyoumean.go | 33 - .../github.com/hashicorp/hcl/v2/json/doc.go | 12 - .../hashicorp/hcl/v2/json/navigation.go | 70 - .../hashicorp/hcl/v2/json/parser.go | 496 - .../hashicorp/hcl/v2/json/peeker.go | 25 - .../hashicorp/hcl/v2/json/public.go | 94 - .../hashicorp/hcl/v2/json/scanner.go | 297 - .../github.com/hashicorp/hcl/v2/json/spec.md | 405 - .../hashicorp/hcl/v2/json/structure.go | 637 - .../hashicorp/hcl/v2/json/tokentype_string.go | 29 - .../terraform-config-inspect/LICENSE | 353 - .../tfconfig/diagnostic.go | 138 - .../terraform-config-inspect/tfconfig/doc.go | 21 - .../terraform-config-inspect/tfconfig/load.go | 130 - .../tfconfig/load_hcl.go | 322 - .../tfconfig/load_legacy.go | 325 - .../tfconfig/module.go | 35 - .../tfconfig/module_call.go | 11 - .../tfconfig/output.go | 9 - .../tfconfig/provider_ref.go | 9 - .../tfconfig/resource.go | 64 - .../tfconfig/schema.go | 106 - .../tfconfig/source_pos.go | 50 - .../tfconfig/variable.go | 16 - .../hashicorp/terraform-plugin-sdk/LICENSE | 354 - .../helper/resource/grpc_test_provider.go | 43 - .../helper/resource/map.go | 140 - .../helper/resource/resource.go | 49 - .../helper/resource/testing_config.go | 404 - .../helper/resource/testing_import_state.go | 233 - .../helper/schema/backend.go | 200 - .../httpclient/useragent.go | 26 - .../internal/addrs/count_attr.go | 12 - .../internal/addrs/for_each_attr.go | 12 - .../internal/addrs/input_variable.go | 41 - .../internal/addrs/instance_key.go | 123 - .../internal/addrs/local_value.go | 48 - .../internal/addrs/module.go | 51 - .../internal/addrs/module_call.go | 63 - .../internal/addrs/module_instance.go | 388 - .../internal/addrs/output_value.go | 75 - .../internal/addrs/parse_ref.go | 346 - .../internal/addrs/parse_target.go | 240 - .../internal/addrs/path_attr.go | 12 - .../internal/addrs/provider_config.go | 289 - .../internal/addrs/provider_type.go | 7 - .../internal/addrs/referenceable.go | 20 - .../internal/addrs/resource.go | 254 - .../internal/addrs/resource_phase.go | 105 - .../internal/addrs/resourcemode_string.go | 33 - .../internal/addrs/self.go | 14 - .../internal/addrs/targetable.go | 26 - .../internal/addrs/terraform_attr.go | 12 - .../internal/command/format/diagnostic.go | 295 - .../internal/command/format/diff.go | 1192 -- .../internal/command/format/format.go | 8 - .../internal/command/format/object_id.go | 123 - .../internal/command/format/state.go | 208 - .../internal/configs/backend.go | 24 - .../internal/configs/compat_shim.go | 116 - .../internal/configs/config.go | 164 - .../internal/configs/config_build.go | 160 - .../internal/configs/configload/copy_dir.go | 125 - .../internal/configs/configload/doc.go | 4 - .../internal/configs/configload/inode.go | 21 - .../configs/configload/inode_freebsd.go | 21 - .../configs/configload/inode_windows.go | 8 - .../internal/configs/configload/loader.go | 101 - .../configs/configload/loader_load.go | 105 - .../configs/configload/loader_snapshot.go | 492 - .../internal/configs/configload/module_mgr.go | 62 - .../internal/configs/configload/testing.go | 43 - .../configs/configschema/decoder_spec.go | 123 - .../configs/configschema/implied_type.go | 21 - .../configs/configschema/internal_validate.go | 105 - .../configs/configschema/none_required.go | 38 - .../configschema/validate_traversal.go | 173 - .../internal/configs/depends_on.go | 23 - .../internal/configs/doc.go | 19 - .../internal/configs/module.go | 404 - .../internal/configs/module_call.go | 188 - .../internal/configs/module_merge.go | 247 - .../internal/configs/module_merge_body.go | 143 - .../internal/configs/named_values.go | 354 - .../internal/configs/parser.go | 100 - .../internal/configs/parser_config.go | 247 - .../internal/configs/parser_config_dir.go | 163 - .../internal/configs/parser_values.go | 43 - .../internal/configs/provider.go | 144 - .../internal/configs/provisioner.go | 150 - .../configs/provisioneronfailure_string.go | 25 - .../configs/provisionerwhen_string.go | 25 - .../internal/configs/resource.go | 490 - .../internal/configs/synth_body.go | 118 - .../internal/configs/util.go | 63 - .../internal/configs/variable_type_hint.go | 45 - .../configs/variabletypehint_string.go | 39 - .../internal/configs/version_constraint.go | 71 - .../terraform-plugin-sdk/internal/dag/dag.go | 301 - .../terraform-plugin-sdk/internal/dag/dot.go | 278 - .../terraform-plugin-sdk/internal/dag/edge.go | 37 - .../internal/dag/graph.go | 391 - .../internal/dag/marshal.go | 460 - .../terraform-plugin-sdk/internal/dag/set.go | 123 - .../internal/dag/tarjan.go | 107 - .../terraform-plugin-sdk/internal/dag/walk.go | 454 - .../internal/earlyconfig/config.go | 63 - .../internal/earlyconfig/config_build.go | 144 - .../internal/earlyconfig/diagnostics.go | 78 - .../internal/earlyconfig/doc.go | 20 - .../internal/earlyconfig/module.go | 13 - .../internal/flatmap/expand.go | 152 - .../internal/flatmap/flatten.go | 71 - .../internal/flatmap/map.go | 82 - .../internal/helper/config/validator.go | 214 - .../helper/didyoumean/name_suggestion.go | 24 - .../internal/httpclient/client.go | 53 - .../internal/initwd/copy_dir.go | 125 - .../internal/initwd/doc.go | 7 - .../internal/initwd/from_module.go | 363 - .../internal/initwd/getter.go | 204 - .../internal/initwd/inode.go | 21 - .../internal/initwd/inode_freebsd.go | 21 - .../internal/initwd/inode_windows.go | 8 - .../internal/initwd/module_install.go | 558 - .../internal/initwd/module_install_hooks.go | 36 - .../internal/lang/blocktoattr/doc.go | 5 - .../internal/lang/blocktoattr/fixup.go | 187 - .../internal/lang/blocktoattr/schema.go | 119 - .../internal/lang/blocktoattr/variables.go | 45 - .../internal/lang/data.go | 34 - .../terraform-plugin-sdk/internal/lang/doc.go | 5 - .../internal/lang/eval.go | 473 - .../internal/lang/funcs/cidr.go | 218 - .../internal/lang/funcs/collection.go | 1519 --- .../internal/lang/funcs/conversion.go | 87 - .../internal/lang/funcs/crypto.go | 325 - .../internal/lang/funcs/datetime.go | 70 - .../internal/lang/funcs/encoding.go | 140 - .../internal/lang/funcs/filesystem.go | 360 - .../internal/lang/funcs/number.go | 217 - .../internal/lang/funcs/string.go | 280 - .../internal/lang/functions.go | 146 - .../internal/lang/references.go | 81 - .../internal/lang/scope.go | 34 - .../internal/modsdir/doc.go | 3 - .../internal/modsdir/manifest.go | 138 - .../internal/modsdir/paths.go | 3 - .../internal/moduledeps/dependencies.go | 43 - .../internal/moduledeps/doc.go | 7 - .../internal/moduledeps/module.go | 134 - .../internal/moduledeps/provider.go | 30 - .../internal/plans/action.go | 22 - .../internal/plans/action_string.go | 49 - .../internal/plans/changes.go | 308 - .../internal/plans/changes_src.go | 190 - .../internal/plans/changes_sync.go | 144 - .../internal/plans/doc.go | 5 - .../internal/plans/dynamic_value.go | 96 - .../internal/plans/objchange/all_null.go | 18 - .../internal/plans/objchange/compatible.go | 447 - .../internal/plans/objchange/doc.go | 4 - .../internal/plans/objchange/lcs.go | 104 - .../internal/plans/objchange/objchange.go | 390 - .../internal/plans/objchange/plan_valid.go | 267 - .../internal/plans/plan.go | 92 - .../internal/plugin/discovery/error.go | 64 - .../internal/plugin/discovery/find.go | 191 - .../internal/plugin/discovery/get.go | 669 - .../internal/plugin/discovery/get_cache.go | 48 - .../internal/plugin/discovery/hashicorp.go | 34 - .../internal/plugin/discovery/meta.go | 41 - .../internal/plugin/discovery/meta_set.go | 195 - .../internal/plugin/discovery/requirements.go | 105 - .../internal/plugin/discovery/signature.go | 19 - .../internal/plugin/discovery/version.go | 77 - .../internal/plugin/discovery/version_set.go | 83 - .../internal/providers/addressed_types.go | 47 - .../internal/providers/doc.go | 3 - .../internal/providers/provider.go | 359 - .../internal/providers/resolver.go | 68 - .../internal/provisioners/doc.go | 3 - .../internal/provisioners/factory.go | 5 - .../internal/provisioners/provisioner.go | 82 - .../internal/registry/client.go | 346 - .../internal/registry/errors.go | 55 - .../internal/registry/regsrc/friendly_host.go | 140 - .../internal/registry/regsrc/module.go | 175 - .../internal/registry/regsrc/regsrc.go | 8 - .../registry/regsrc/terraform_provider.go | 60 - .../internal/registry/response/module.go | 46 - .../registry/response/module_versions.go | 32 - .../internal/registry/response/pagination.go | 65 - .../registry/response/terraform_provider.go | 95 - .../internal/states/doc.go | 3 - .../internal/states/eachmode_string.go | 35 - .../internal/states/instance_generation.go | 20 - .../internal/states/instance_object.go | 120 - .../internal/states/instance_object_src.go | 113 - .../internal/states/module.go | 268 - .../internal/states/objectstatus_string.go | 33 - .../internal/states/output_value.go | 14 - .../internal/states/resource.go | 233 - .../internal/states/state.go | 229 - .../internal/states/state_deepcopy.go | 221 - .../internal/states/state_equal.go | 18 - .../internal/states/state_string.go | 279 - .../internal/states/statefile/diagnostics.go | 62 - .../internal/states/statefile/doc.go | 3 - .../internal/states/statefile/file.go | 31 - .../internal/states/statefile/read.go | 209 - .../internal/states/statefile/version0.go | 23 - .../internal/states/statefile/version1.go | 167 - .../states/statefile/version1_upgrade.go | 172 - .../internal/states/statefile/version2.go | 204 - .../states/statefile/version2_upgrade.go | 145 - .../internal/states/statefile/version3.go | 50 - .../states/statefile/version3_upgrade.go | 444 - .../internal/states/statefile/version4.go | 604 - .../internal/states/statefile/write.go | 17 - .../internal/states/sync.go | 484 - .../internal/tfdiags/contextual.go | 372 - .../internal/tfdiags/diagnostic.go | 40 - .../internal/tfdiags/hcl.go | 87 - .../internal/tfdiags/source_range.go | 35 - .../internal/tfdiags/sourceless.go | 13 - .../internal/version/version.go | 36 - .../terraform-plugin-sdk/meta/meta.go | 36 - .../terraform-plugin-sdk/plugin/client.go | 35 - .../plugin/grpc_provider.go | 563 - .../plugin/grpc_provisioner.go | 178 - .../terraform-plugin-sdk/plugin/plugin.go | 14 - .../plugin/resource_provider.go | 620 - .../terraform-plugin-sdk/plugin/serve.go | 100 - .../terraform-plugin-sdk/plugin/ui_input.go | 52 - .../terraform-plugin-sdk/plugin/ui_output.go | 29 - .../terraform-plugin-sdk/terraform/context.go | 882 -- .../terraform/context_components.go | 68 - .../terraform/context_graph_type.go | 32 - .../terraform/context_import.go | 83 - .../terraform/context_input.go | 251 - .../terraform/edge_destroy.go | 17 - .../terraform-plugin-sdk/terraform/eval.go | 70 - .../terraform/eval_apply.go | 656 - .../terraform/eval_check_prevent_destroy.go | 47 - .../terraform/eval_context.go | 133 - .../terraform/eval_context_builtin.go | 329 - .../terraform/eval_context_mock.go | 319 - .../terraform/eval_count.go | 120 - .../terraform/eval_count_boundary.go | 77 - .../terraform/eval_diff.go | 783 -- .../terraform/eval_error.go | 20 - .../terraform/eval_filter.go | 25 - .../terraform/eval_filter_operation.go | 49 - .../terraform/eval_for_each.go | 95 - .../terraform-plugin-sdk/terraform/eval_if.go | 26 - .../terraform/eval_import_state.go | 95 - .../terraform/eval_lang.go | 61 - .../terraform/eval_local.go | 74 - .../terraform/eval_noop.go | 8 - .../terraform/eval_output.go | 135 - .../terraform/eval_provider.go | 147 - .../terraform/eval_provisioner.go | 55 - .../terraform/eval_read_data.go | 395 - .../terraform/eval_refresh.go | 106 - .../terraform/eval_sequence.go | 42 - .../terraform/eval_state.go | 475 - .../terraform/eval_state_upgrade.go | 106 - .../terraform/eval_validate.go | 588 - .../terraform/eval_validate_selfref.go | 67 - .../terraform/eval_variable.go | 96 - .../terraform/evaltree_provider.go | 88 - .../terraform/evaluate.go | 838 -- .../terraform/evaluate_valid.go | 299 - .../terraform/features.go | 7 - .../terraform-plugin-sdk/terraform/graph.go | 141 - .../terraform/graph_builder.go | 85 - .../terraform/graph_builder_apply.go | 211 - .../terraform/graph_builder_destroy_plan.go | 97 - .../terraform/graph_builder_eval.go | 108 - .../terraform/graph_builder_import.go | 100 - .../terraform/graph_builder_plan.go | 204 - .../terraform/graph_builder_refresh.go | 194 - .../terraform/graph_builder_validate.go | 34 - .../terraform/graph_dot.go | 9 - .../terraform/graph_interface_subgraph.go | 11 - .../terraform/graph_walk.go | 32 - .../terraform/graph_walk_context.go | 157 - .../terraform/graph_walk_operation.go | 18 - .../terraform/graphtype_string.go | 30 - .../terraform-plugin-sdk/terraform/hook.go | 145 - .../terraform/hook_mock.go | 274 - .../terraform/hook_stop.go | 100 - .../terraform/instancetype.go | 13 - .../terraform/instancetype_string.go | 26 - .../terraform/module_dependencies.go | 202 - .../terraform/node_count_boundary.go | 22 - .../terraform/node_data_destroy.go | 40 - .../terraform/node_data_refresh.go | 229 - .../terraform/node_local.go | 70 - .../terraform/node_module_removed.go | 89 - .../terraform/node_module_variable.go | 142 - .../terraform/node_output.go | 200 - .../terraform/node_output_orphan.go | 48 - .../terraform/node_provider.go | 11 - .../terraform/node_provider_abstract.go | 96 - .../terraform/node_provider_disabled.go | 27 - .../terraform/node_provider_eval.go | 20 - .../terraform/node_provisioner.go | 44 - .../terraform/node_resource_abstract.go | 446 - .../terraform/node_resource_apply.go | 71 - .../terraform/node_resource_apply_instance.go | 426 - .../terraform/node_resource_destroy.go | 321 - .../node_resource_destroy_deposed.go | 313 - .../terraform/node_resource_plan.go | 166 - .../terraform/node_resource_plan_destroy.go | 88 - .../terraform/node_resource_plan_instance.go | 201 - .../terraform/node_resource_plan_orphan.go | 84 - .../terraform/node_resource_refresh.go | 296 - .../terraform/node_resource_validate.go | 90 - .../terraform/node_root_variable.go | 44 - .../terraform-plugin-sdk/terraform/path.go | 17 - .../terraform-plugin-sdk/terraform/plan.go | 94 - .../terraform/provider_mock.go | 521 - .../terraform/provisioner_mock.go | 154 - .../terraform/resource_address.go | 618 - .../terraform/resource_provider.go | 319 - .../terraform/resource_provider_mock.go | 315 - .../terraform/resource_provisioner.go | 70 - .../terraform/resource_provisioner_mock.go | 87 - .../terraform-plugin-sdk/terraform/schemas.go | 278 - .../terraform/state_upgrade_v1_to_v2.go | 189 - .../terraform/state_upgrade_v2_to_v3.go | 142 - .../terraform/state_v1.go | 145 - .../terraform-plugin-sdk/terraform/testing.go | 19 - .../terraform/transform.go | 62 - .../transform_attach_config_provider.go | 19 - .../transform_attach_config_resource.go | 74 - .../terraform/transform_attach_schema.go | 99 - .../terraform/transform_attach_state.go | 68 - .../terraform/transform_config.go | 133 - .../terraform/transform_count_boundary.go | 33 - .../terraform/transform_destroy_cbd.go | 297 - .../terraform/transform_destroy_edge.go | 323 - .../terraform/transform_diff.go | 184 - .../terraform/transform_expand.go | 48 - .../terraform/transform_import_provider.go | 44 - .../terraform/transform_import_state.go | 239 - .../terraform/transform_local.go | 48 - .../terraform/transform_module_variable.go | 126 - .../terraform/transform_orphan_count.go | 175 - .../terraform/transform_orphan_output.go | 60 - .../terraform/transform_orphan_resource.go | 179 - .../terraform/transform_output.go | 95 - .../terraform/transform_provider.go | 705 -- .../terraform/transform_provisioner.go | 205 - .../terraform/transform_reference.go | 446 - .../terraform/transform_removed_modules.go | 33 - .../terraform/transform_resource_count.go | 71 - .../terraform/transform_root.go | 38 - .../terraform/transform_state.go | 74 - .../terraform/transform_targets.go | 267 - .../transform_transitive_reduction.go | 20 - .../terraform/transform_variable.go | 40 - .../terraform/transform_vertex.go | 44 - .../terraform/ui_input.go | 28 - .../terraform/ui_input_mock.go | 25 - .../terraform/ui_input_prefix.go | 20 - .../terraform/ui_output.go | 7 - .../terraform/ui_output_callback.go | 9 - .../terraform/ui_output_mock.go | 21 - .../terraform/ui_output_provisioner.go | 19 - .../terraform-plugin-sdk/terraform/util.go | 75 - .../terraform/valuesourcetype_string.go | 59 - .../terraform/variables.go | 313 - .../terraform/version_required.go | 62 - .../terraform/walkoperation_string.go | 31 - .../{hcl => terraform-plugin-sdk/v2}/LICENSE | 0 .../terraform-plugin-sdk/v2/acctest/doc.go | 24 + .../{ => v2}/acctest/helper.go | 2 +- .../v2/diag/diagnostic.go | 112 + .../{ => v2}/helper/customdiff/compose.go | 13 +- .../{ => v2}/helper/customdiff/computed.go | 8 +- .../{ => v2}/helper/customdiff/condition.go | 28 +- .../{ => v2}/helper/customdiff/doc.go | 0 .../{ => v2}/helper/customdiff/force_new.go | 12 +- .../{ => v2}/helper/customdiff/validate.go | 16 +- .../{ => v2}/helper/hashcode/hashcode.go | 0 .../{ => v2}/helper/logging/logging.go | 0 .../{ => v2}/helper/logging/transport.go | 0 .../{ => v2}/helper/resource/error.go | 0 .../{ => v2}/helper/resource/id.go | 0 .../{ => v2}/helper/resource/state.go | 34 +- .../{ => v2}/helper/resource/state_shim.go | 185 +- .../{ => v2}/helper/resource/testing.go | 614 +- .../v2/helper/resource/testing_config.go | 25 + .../{ => v2}/helper/resource/testing_new.go | 54 +- .../helper/resource/testing_new_config.go | 9 +- .../resource/testing_new_import_state.go | 51 +- .../{ => v2}/helper/resource/wait.go | 41 +- .../{ => v2}/helper/schema/README.md | 0 .../{ => v2}/helper/schema/core_schema.go | 31 +- .../schema/data_source_resource_shim.go | 0 .../{ => v2}/helper/schema/equal.go | 0 .../{ => v2}/helper/schema/field_reader.go | 8 - .../helper/schema/field_reader_config.go | 49 +- .../helper/schema/field_reader_diff.go | 3 +- .../helper/schema/field_reader_map.go | 33 - .../helper/schema/field_reader_multi.go | 0 .../{ => v2}/helper/schema/field_writer.go | 0 .../helper/schema/field_writer_map.go | 0 .../helper/schema/getsource_string.go | 0 .../{ => v2}/helper/schema/provider.go | 330 +- .../{ => v2}/helper/schema/resource.go | 463 +- .../{ => v2}/helper/schema/resource_data.go | 82 +- .../helper/schema/resource_data_get_source.go | 0 .../{ => v2}/helper/schema/resource_diff.go | 2 +- .../helper/schema/resource_importer.go | 46 +- .../helper/schema/resource_timeout.go | 5 +- .../{ => v2}/helper/schema/schema.go | 739 +- .../{ => v2}/helper/schema/serialize.go | 9 +- .../{ => v2}/helper/schema/set.go | 54 +- .../{ => v2}/helper/schema/shims.go | 19 +- .../{ => v2}/helper/schema/testing.go | 10 +- .../{ => v2}/helper/schema/valuetype.go | 0 .../helper/schema/valuetype_string.go | 0 .../{ => v2}/internal/addrs/doc.go | 0 .../v2/internal/addrs/instance_key.go | 47 + .../v2/internal/addrs/module.go | 13 + .../v2/internal/addrs/module_instance.go | 241 + .../v2/internal/addrs/resource.go | 130 + .../v2/internal/addrs/resourcemode_string.go | 33 + .../configs/configschema/coerce_value.go | 4 +- .../internal/configs/configschema/doc.go | 0 .../configs/configschema/empty_value.go | 2 +- .../configs/configschema/implied_type.go | 68 + .../configschema/nestingmode_string.go | 0 .../internal/configs/configschema/schema.go | 2 +- .../internal/configs/hcl2shim/flatmap.go | 5 +- .../internal/configs/hcl2shim/paths.go | 2 +- .../configs/hcl2shim/single_attr_body.go | 0 .../internal/configs/hcl2shim/values.go | 4 +- .../internal/configs/hcl2shim/values_equiv.go | 2 +- .../v2/internal/diagutils/diagutils.go | 45 + .../{ => v2}/internal/helper/plugin/doc.go | 0 .../internal/helper/plugin/grpc_provider.go | 217 +- .../internal/helper/plugin/unknown.go | 5 +- .../internal/plans/objchange/normalize_obj.go | 5 +- .../internal/plugin/convert/diagnostics.go | 78 +- .../internal/plugin/convert/schema.go | 13 +- .../internal/tfdiags/config_traversals.go | 14 +- .../v2/internal/tfdiags/contextual.go | 81 + .../v2/internal/tfdiags/diagnostic.go | 20 + .../internal/tfdiags/diagnostic_base.go | 14 +- .../{ => v2}/internal/tfdiags/diagnostics.go | 107 - .../{ => v2}/internal/tfdiags/doc.go | 0 .../{ => v2}/internal/tfdiags/error.go | 12 +- .../{ => v2}/internal/tfdiags/rpc_friendly.go | 18 - .../internal/tfdiags/severity_string.go | 0 .../internal/tfdiags/simple_warning.go | 10 - .../{ => v2}/internal/tfplugin5/generate.sh | 0 .../internal/tfplugin5/tfplugin5.pb.go | 0 .../internal/tfplugin5/tfplugin5.proto | 0 .../v2/plugin/grpc_provider.go | 41 + .../terraform-plugin-sdk/v2/plugin/serve.go | 70 + .../{ => v2}/terraform/diff.go | 493 +- .../v2/terraform/instancetype.go | 13 + .../v2/terraform/instancetype_string.go | 26 + .../{ => v2}/terraform/resource.go | 187 +- .../v2/terraform/resource_address.go | 226 + .../{ => v2}/terraform/resource_mode.go | 0 .../terraform/resource_mode_string.go | 0 .../v2/terraform/resource_provider.go | 26 + .../v2/terraform/schemas.go | 26 + .../{ => v2}/terraform/state.go | 567 +- .../{ => v2}/terraform/state_filter.go | 68 +- .../terraform-plugin-sdk/v2/terraform/util.go | 22 + .../hashicorp/terraform-plugin-test/helper.go | 7 +- .../hashicorp/terraform-plugin-test/util.go | 40 + .../hashicorp/terraform-svchost/auth/cache.go | 61 - .../terraform-svchost/auth/credentials.go | 118 - .../terraform-svchost/auth/from_map.go | 48 - .../terraform-svchost/auth/helper_program.go | 149 - .../terraform-svchost/auth/static.go | 38 - .../auth/token_credentials.go | 43 - .../terraform-svchost/disco/disco.go | 275 - .../hashicorp/terraform-svchost/disco/host.go | 412 - .../terraform-svchost/disco/http_transport.go | 30 - .../terraform-svchost/disco/oauth_client.go | 178 - .../hashicorp/terraform-svchost/go.mod | 12 - .../hashicorp/terraform-svchost/go.sum | 36 - .../hashicorp/terraform-svchost/label_iter.go | 69 - .../hashicorp/terraform-svchost/svchost.go | 207 - .../pingaccess-sdk-go/pingaccess/models.go | 26 +- .../pingaccess/pingaccess.go | 2 + .../pingaccess/pingaccessSDK.go | 5 + .../jmespath/go-jmespath/.gitignore | 2 +- vendor/github.com/jmespath/go-jmespath/api.go | 2 +- .../github.com/mattn/go-colorable/.travis.yml | 9 - .../github.com/mattn/go-colorable/README.md | 48 - .../mattn/go-colorable/colorable_appengine.go | 29 - .../mattn/go-colorable/colorable_others.go | 30 - .../mattn/go-colorable/colorable_windows.go | 980 -- vendor/github.com/mattn/go-colorable/go.mod | 3 - vendor/github.com/mattn/go-colorable/go.sum | 4 - .../mattn/go-colorable/noncolorable.go | 55 - vendor/github.com/mattn/go-isatty/.travis.yml | 14 - vendor/github.com/mattn/go-isatty/LICENSE | 9 - vendor/github.com/mattn/go-isatty/README.md | 50 - vendor/github.com/mattn/go-isatty/doc.go | 2 - vendor/github.com/mattn/go-isatty/go.mod | 5 - vendor/github.com/mattn/go-isatty/go.sum | 2 - vendor/github.com/mattn/go-isatty/go.test.sh | 12 - .../github.com/mattn/go-isatty/isatty_bsd.go | 18 - .../mattn/go-isatty/isatty_others.go | 15 - .../mattn/go-isatty/isatty_plan9.go | 22 - .../mattn/go-isatty/isatty_solaris.go | 22 - .../mattn/go-isatty/isatty_tcgets.go | 18 - .../mattn/go-isatty/isatty_windows.go | 125 - .../github.com/mattn/go-isatty/renovate.json | 8 - vendor/github.com/mitchellh/cli/.travis.yml | 14 - vendor/github.com/mitchellh/cli/LICENSE | 354 - vendor/github.com/mitchellh/cli/Makefile | 20 - vendor/github.com/mitchellh/cli/README.md | 67 - .../github.com/mitchellh/cli/autocomplete.go | 43 - vendor/github.com/mitchellh/cli/cli.go | 720 -- vendor/github.com/mitchellh/cli/command.go | 67 - .../github.com/mitchellh/cli/command_mock.go | 63 - vendor/github.com/mitchellh/cli/go.mod | 12 - vendor/github.com/mitchellh/cli/go.sum | 22 - vendor/github.com/mitchellh/cli/help.go | 79 - vendor/github.com/mitchellh/cli/ui.go | 187 - vendor/github.com/mitchellh/cli/ui_colored.go | 73 - .../github.com/mitchellh/cli/ui_concurrent.go | 54 - vendor/github.com/mitchellh/cli/ui_mock.go | 111 - vendor/github.com/mitchellh/cli/ui_writer.go | 18 - .../mitchellh/colorstring/.travis.yml | 15 - .../mitchellh/colorstring/README.md | 30 - .../mitchellh/colorstring/colorstring.go | 244 - .../github.com/mitchellh/colorstring/go.mod | 1 - .../mitchellh/go-testing-interface/testing.go | 37 +- .../go-testing-interface/testing_go19.go | 7 +- .../runc/libcontainer/system/linux.go | 155 - .../runc/libcontainer/system/proc.go | 113 - .../libcontainer/system/syscall_linux_32.go | 26 - .../libcontainer/system/syscall_linux_64.go | 26 - .../runc/libcontainer/system/sysconfig.go | 12 - .../libcontainer/system/sysconfig_notcgo.go | 15 - .../runc/libcontainer/system/unsupported.go | 27 - .../runc/libcontainer/system/xattrs_linux.go | 35 - .../github.com/ory/dockertest/v3/.gitignore | 7 + .../github.com/ory/dockertest/v3/.travis.yml | 18 + .../ory/dockertest/v3/CONTRIBUTING.md | 119 + .../go-cty-yaml => ory/dockertest/v3}/LICENSE | 5 +- vendor/github.com/ory/dockertest/v3/README.md | 121 + .../github.com/ory/dockertest/v3/SECURITY.md | 28 + .../ory/dockertest/v3/docker/AUTHORS | 192 + .../ory/dockertest/v3/docker/DOCKER-LICENSE | 6 + .../ory/dockertest/v3/docker/LICENSE | 22 + .../ory/dockertest/v3/docker/README.markdown | 133 + .../ory/dockertest/v3/docker/auth.go | 185 + .../ory/dockertest/v3/docker/change.go | 43 + .../ory/dockertest/v3/docker/client.go | 1092 ++ .../ory/dockertest/v3/docker/client_unix.go | 32 + .../dockertest/v3/docker/client_windows.go | 45 + .../ory/dockertest/v3/docker/container.go | 1623 +++ .../ory/dockertest/v3/docker/distribution.go | 26 + .../ory/dockertest/v3/docker/env.go | 172 + .../ory/dockertest/v3/docker/event.go | 410 + .../ory/dockertest/v3/docker/exec.go | 213 + .../ory/dockertest/v3/docker/image.go | 722 ++ .../ory/dockertest/v3/docker/misc.go | 181 + .../ory/dockertest/v3/docker/network.go | 321 + .../ory/dockertest/v3/docker/opts/env.go | 48 + .../ory/dockertest/v3/docker/opts/hosts.go | 165 + .../dockertest/v3/docker/opts/hosts_unix.go | 8 + .../v3/docker/opts/hosts_windows.go | 4 + .../ory/dockertest/v3/docker/opts/ip.go | 47 + .../ory/dockertest/v3/docker/opts/opts.go | 337 + .../dockertest/v3/docker/opts/opts_unix.go | 22 + .../dockertest/v3/docker/opts/opts_windows.go | 72 + .../dockertest/v3/docker/opts/quotedstring.go | 37 + .../ory/dockertest/v3/docker/opts/runtime.go | 79 + .../ory/dockertest/v3/docker/opts/ulimit.go | 81 + .../v3}/docker/pkg/archive/README.md | 0 .../v3}/docker/pkg/archive/archive.go | 111 +- .../v3/docker/pkg/archive/archive_linux.go | 92 + .../v3/docker/pkg/archive/archive_other.go | 7 + .../v3}/docker/pkg/archive/archive_unix.go | 51 +- .../v3}/docker/pkg/archive/archive_windows.go | 24 +- .../v3}/docker/pkg/archive/changes.go | 20 +- .../v3}/docker/pkg/archive/changes_linux.go | 31 +- .../v3}/docker/pkg/archive/changes_other.go | 4 +- .../v3}/docker/pkg/archive/changes_unix.go | 12 +- .../v3/docker/pkg/archive/changes_windows.go | 30 + .../dockertest/v3}/docker/pkg/archive/copy.go | 12 +- .../v3}/docker/pkg/archive/copy_unix.go | 2 +- .../v3}/docker/pkg/archive/copy_windows.go | 2 +- .../dockertest/v3}/docker/pkg/archive/diff.go | 26 +- .../v3}/docker/pkg/archive/time_linux.go | 4 +- .../docker/pkg/archive/time_unsupported.go | 2 +- .../v3}/docker/pkg/archive/whiteouts.go | 2 +- .../dockertest/v3}/docker/pkg/archive/wrap.go | 2 +- .../v3}/docker/pkg/fileutils/fileutils.go | 6 +- .../docker/pkg/fileutils/fileutils_darwin.go | 2 +- .../docker/pkg/fileutils/fileutils_unix.go | 2 +- .../docker/pkg/fileutils/fileutils_windows.go | 2 +- .../v3/docker/pkg/homedir/homedir_linux.go | 21 + .../v3/docker/pkg/homedir/homedir_others.go | 13 + .../v3/docker/pkg/homedir/homedir_unix.go | 34 + .../v3/docker/pkg/homedir/homedir_windows.go | 24 + .../v3}/docker/pkg/idtools/idtools.go | 51 +- .../v3}/docker/pkg/idtools/idtools_unix.go | 13 +- .../v3/docker/pkg/idtools/idtools_windows.go | 23 + .../docker/pkg/idtools/usergroupadd_linux.go | 2 +- .../pkg/idtools/usergroupadd_unsupported.go | 2 +- .../v3}/docker/pkg/idtools/utils_unix.go | 2 +- .../v3}/docker/pkg/ioutils/buffer.go | 2 +- .../v3}/docker/pkg/ioutils/bytespipe.go | 2 +- .../v3}/docker/pkg/ioutils/fswriters.go | 2 +- .../v3}/docker/pkg/ioutils/readers.go | 5 +- .../v3}/docker/pkg/ioutils/temp_unix.go | 2 +- .../v3}/docker/pkg/ioutils/temp_windows.go | 4 +- .../v3}/docker/pkg/ioutils/writeflusher.go | 2 +- .../v3}/docker/pkg/ioutils/writers.go | 2 +- .../v3/docker/pkg/jsonmessage/jsonmessage.go | 335 + .../v3}/docker/pkg/longpath/longpath.go | 2 +- .../dockertest/v3}/docker/pkg/mount/flags.go | 14 +- .../v3}/docker/pkg/mount/flags_freebsd.go | 2 +- .../v3}/docker/pkg/mount/flags_linux.go | 2 +- .../v3}/docker/pkg/mount/flags_unsupported.go | 2 +- .../dockertest/v3/docker/pkg/mount/mount.go | 110 + .../v3}/docker/pkg/mount/mounter_freebsd.go | 19 +- .../v3}/docker/pkg/mount/mounter_linux.go | 32 +- .../docker/pkg/mount/mounter_unsupported.go | 11 + .../v3}/docker/pkg/mount/mountinfo.go | 2 +- .../v3}/docker/pkg/mount/mountinfo_freebsd.go | 18 +- .../v3/docker/pkg/mount/mountinfo_linux.go | 93 + .../docker/pkg/mount/mountinfo_unsupported.go | 4 +- .../v3/docker/pkg/mount/mountinfo_windows.go | 6 + .../docker/pkg/mount/sharedsubtree_linux.go | 22 +- .../dockertest/v3}/docker/pkg/pools/pools.go | 4 +- .../v3/docker/pkg/stdcopy/stdcopy.go | 190 + .../v3}/docker/pkg/system/chtimes.go | 2 +- .../v3}/docker/pkg/system/chtimes_unix.go | 2 +- .../v3}/docker/pkg/system/chtimes_windows.go | 2 +- .../v3}/docker/pkg/system/errors.go | 2 +- .../v3}/docker/pkg/system/exitcode.go | 2 +- .../v3}/docker/pkg/system/filesys.go | 2 +- .../v3}/docker/pkg/system/filesys_windows.go | 9 +- .../dockertest/v3}/docker/pkg/system/init.go | 2 +- .../v3/docker/pkg/system/init_unix.go | 7 + .../v3/docker/pkg/system/init_windows.go | 12 + .../dockertest/v3/docker/pkg/system/lcow.go | 69 + .../v3}/docker/pkg/system/lcow_unix.go | 2 +- .../v3}/docker/pkg/system/lcow_windows.go | 2 +- .../v3}/docker/pkg/system/lstat_unix.go | 5 +- .../v3}/docker/pkg/system/lstat_windows.go | 2 +- .../v3}/docker/pkg/system/meminfo.go | 2 +- .../v3}/docker/pkg/system/meminfo_linux.go | 2 +- .../docker/pkg/system/meminfo_unsupported.go | 2 +- .../v3}/docker/pkg/system/meminfo_windows.go | 2 +- .../dockertest/v3}/docker/pkg/system/mknod.go | 2 +- .../v3}/docker/pkg/system/mknod_windows.go | 2 +- .../dockertest/v3}/docker/pkg/system/path.go | 2 +- .../v3}/docker/pkg/system/process_unix.go | 2 +- .../v3}/docker/pkg/system/process_windows.go | 2 +- .../dockertest/v3}/docker/pkg/system/rm.go | 6 +- .../v3}/docker/pkg/system/stat_darwin.go | 2 +- .../v3}/docker/pkg/system/stat_freebsd.go | 2 +- .../v3}/docker/pkg/system/stat_linux.go | 2 +- .../v3}/docker/pkg/system/stat_openbsd.go | 2 +- .../v3}/docker/pkg/system/stat_solaris.go | 2 +- .../v3}/docker/pkg/system/stat_unix.go | 5 +- .../v3}/docker/pkg/system/stat_windows.go | 2 +- .../v3}/docker/pkg/system/syscall_unix.go | 2 +- .../v3}/docker/pkg/system/syscall_windows.go | 75 +- .../dockertest/v3}/docker/pkg/system/umask.go | 2 +- .../v3}/docker/pkg/system/umask_windows.go | 2 +- .../v3}/docker/pkg/system/utimes_freebsd.go | 2 +- .../v3}/docker/pkg/system/utimes_linux.go | 2 +- .../docker/pkg/system/utimes_unsupported.go | 2 +- .../v3}/docker/pkg/system/xattrs_linux.go | 2 +- .../docker/pkg/system/xattrs_unsupported.go | 2 +- .../dockertest/v3/docker/pkg/term/ascii.go | 66 + .../dockertest/v3/docker/pkg/term/proxy.go | 74 + .../ory/dockertest/v3/docker/pkg/term/tc.go | 20 + .../ory/dockertest/v3/docker/pkg/term/term.go | 124 + .../v3/docker/pkg/term/term_windows.go | 228 + .../v3/docker/pkg/term/termios_bsd.go | 42 + .../v3/docker/pkg/term/termios_linux.go | 39 + .../v3/docker/pkg/term/windows/ansi_reader.go | 263 + .../v3/docker/pkg/term/windows/ansi_writer.go | 64 + .../v3/docker/pkg/term/windows/console.go | 35 + .../v3/docker/pkg/term/windows/windows.go | 33 + .../dockertest/v3/docker/pkg/term/winsize.go | 20 + .../ory/dockertest/v3/docker/plugin.go | 418 + .../ory/dockertest/v3/docker/signal.go | 49 + .../ory/dockertest/v3/docker/tar.go | 122 + .../ory/dockertest/v3/docker/tls.go | 118 + .../dockertest/v3/docker}/types/auth.go | 2 +- .../v3/docker}/types/blkiodev/blkio.go | 2 +- .../dockertest/v3/docker}/types/client.go | 31 +- .../dockertest/v3/docker}/types/configs.go | 13 +- .../v3/docker}/types/container/config.go | 6 +- .../types/container/container_changes.go | 2 +- .../types/container/container_create.go | 2 +- .../docker}/types/container/container_top.go | 2 +- .../types/container/container_update.go | 2 +- .../docker}/types/container/container_wait.go | 2 +- .../v3/docker}/types/container/host_config.go | 35 +- .../types/container/hostconfig_unix.go | 2 +- .../types/container/hostconfig_windows.go | 2 +- .../docker}/types/container/waitcondition.go | 2 +- .../v3/docker}/types/error_response.go | 0 .../v3/docker}/types/filters/parse.go | 71 +- .../v3/docker}/types/graph_driver_data.go | 0 .../v3/docker}/types/id_response.go | 0 .../types/image_delete_response_item.go | 0 .../v3/docker}/types/image_summary.go | 0 .../v3/docker}/types/mount/mount.go | 5 +- .../v3/docker}/types/network/network.go | 21 +- .../dockertest/v3/docker}/types/plugin.go | 3 - .../v3/docker}/types/plugin_device.go | 0 .../dockertest/v3/docker}/types/plugin_env.go | 0 .../v3/docker}/types/plugin_interface_type.go | 0 .../v3/docker}/types/plugin_mount.go | 0 .../v3/docker}/types/plugin_responses.go | 2 +- .../dockertest/v3/docker}/types/port.go | 2 +- .../v3/docker}/types/registry/authenticate.go | 2 +- .../v3/docker}/types/registry/registry.go | 2 +- .../dockertest/v3/docker}/types/seccomp.go | 7 +- .../docker}/types/service_update_response.go | 0 .../dockertest/v3/docker}/types/stats.go | 6 +- .../v3/docker}/types/strslice/strslice.go | 2 +- .../dockertest/v3/docker}/types/types.go | 53 +- .../v3/docker}/types/versions/README.md | 0 .../v3/docker}/types/versions/compare.go | 2 +- .../dockertest/v3/docker}/types/volume.go | 0 .../ory/dockertest/v3/docker/volume.go | 190 + .../ory/dockertest/v3/dockertest.go | 599 + vendor/github.com/ory/dockertest/v3/go.mod | 25 + vendor/github.com/ory/dockertest/v3/go.sum | 66 + vendor/github.com/posener/complete/.gitignore | 4 - .../github.com/posener/complete/.travis.yml | 16 - .../github.com/posener/complete/LICENSE.txt | 21 - vendor/github.com/posener/complete/args.go | 111 - vendor/github.com/posener/complete/cmd/cmd.go | 128 - .../posener/complete/cmd/install/bash.go | 32 - .../posener/complete/cmd/install/fish.go | 56 - .../posener/complete/cmd/install/install.go | 119 - .../posener/complete/cmd/install/utils.go | 140 - .../posener/complete/cmd/install/zsh.go | 39 - vendor/github.com/posener/complete/command.go | 111 - .../github.com/posener/complete/complete.go | 109 - vendor/github.com/posener/complete/go.mod | 3 - vendor/github.com/posener/complete/go.sum | 4 - vendor/github.com/posener/complete/log.go | 22 - .../github.com/posener/complete/match/file.go | 19 - .../posener/complete/match/match.go | 6 - .../posener/complete/match/prefix.go | 9 - vendor/github.com/posener/complete/predict.go | 41 - .../posener/complete/predict_files.go | 108 - .../posener/complete/predict_set.go | 12 - vendor/github.com/posener/complete/readme.md | 118 - vendor/github.com/posener/complete/test.sh | 12 - vendor/github.com/posener/complete/utils.go | 46 - vendor/github.com/spf13/afero/.travis.yml | 21 - vendor/github.com/spf13/afero/LICENSE.txt | 174 - vendor/github.com/spf13/afero/README.md | 452 - vendor/github.com/spf13/afero/afero.go | 108 - vendor/github.com/spf13/afero/appveyor.yml | 15 - vendor/github.com/spf13/afero/basepath.go | 180 - .../github.com/spf13/afero/cacheOnReadFs.go | 290 - vendor/github.com/spf13/afero/const_bsds.go | 22 - .../github.com/spf13/afero/const_win_unix.go | 25 - .../github.com/spf13/afero/copyOnWriteFs.go | 293 - vendor/github.com/spf13/afero/go.mod | 3 - vendor/github.com/spf13/afero/go.sum | 2 - vendor/github.com/spf13/afero/httpFs.go | 110 - vendor/github.com/spf13/afero/ioutil.go | 230 - vendor/github.com/spf13/afero/lstater.go | 27 - vendor/github.com/spf13/afero/match.go | 110 - vendor/github.com/spf13/afero/mem/dir.go | 37 - vendor/github.com/spf13/afero/mem/dirmap.go | 43 - vendor/github.com/spf13/afero/mem/file.go | 317 - vendor/github.com/spf13/afero/memmap.go | 365 - vendor/github.com/spf13/afero/os.go | 101 - vendor/github.com/spf13/afero/path.go | 106 - vendor/github.com/spf13/afero/readonlyfs.go | 80 - vendor/github.com/spf13/afero/regexpfs.go | 214 - vendor/github.com/spf13/afero/unionFile.go | 320 - vendor/github.com/spf13/afero/util.go | 330 - .../testcontainers-go/.gitignore | 2 - .../testcontainers-go/.travis.yml | 22 - .../testcontainers/testcontainers-go/LICENSE | 21 - .../testcontainers-go/README.md | 160 - .../testcontainers-go/container.go | 172 - .../testcontainers-go/docker.go | 754 -- .../testcontainers-go/generic.go | 41 - .../testcontainers/testcontainers-go/go.mod | 37 - .../testcontainers/testcontainers-go/go.sum | 157 - .../testcontainers-go/logconsumer.go | 22 - .../testcontainers-go/network.go | 32 - .../testcontainers-go/reaper.go | 131 - .../testcontainers-go/wait/host_port.go | 116 - .../testcontainers-go/wait/http.go | 147 - .../testcontainers-go/wait/log.go | 104 - .../testcontainers-go/wait/multi.go | 47 - .../testcontainers-go/wait/sql.go | 66 - .../testcontainers-go/wait/wait.go | 24 - .../zclconf/go-cty-yaml/.travis.yml | 5 - .../zclconf/go-cty-yaml/CHANGELOG.md | 10 - .../zclconf/go-cty-yaml/LICENSE.libyaml | 31 - vendor/github.com/zclconf/go-cty-yaml/NOTICE | 20 - vendor/github.com/zclconf/go-cty-yaml/apic.go | 739 -- .../zclconf/go-cty-yaml/converter.go | 69 - .../zclconf/go-cty-yaml/cty_funcs.go | 57 - .../github.com/zclconf/go-cty-yaml/decode.go | 261 - .../zclconf/go-cty-yaml/emitterc.go | 1685 --- .../github.com/zclconf/go-cty-yaml/encode.go | 189 - .../github.com/zclconf/go-cty-yaml/error.go | 97 - vendor/github.com/zclconf/go-cty-yaml/go.mod | 3 - vendor/github.com/zclconf/go-cty-yaml/go.sum | 18 - .../zclconf/go-cty-yaml/implied_type.go | 268 - .../github.com/zclconf/go-cty-yaml/parserc.go | 1095 -- .../github.com/zclconf/go-cty-yaml/readerc.go | 412 - .../github.com/zclconf/go-cty-yaml/resolve.go | 288 - .../zclconf/go-cty-yaml/scannerc.go | 2696 ---- .../github.com/zclconf/go-cty-yaml/writerc.go | 26 - vendor/github.com/zclconf/go-cty-yaml/yaml.go | 215 - .../github.com/zclconf/go-cty-yaml/yamlh.go | 738 -- .../zclconf/go-cty-yaml/yamlprivateh.go | 173 - vendor/golang.org/x/crypto/AUTHORS | 3 - vendor/golang.org/x/crypto/CONTRIBUTORS | 3 - vendor/golang.org/x/crypto/LICENSE | 27 - vendor/golang.org/x/crypto/PATENTS | 22 - vendor/golang.org/x/crypto/bcrypt/base64.go | 35 - vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 295 - vendor/golang.org/x/crypto/blowfish/block.go | 159 - vendor/golang.org/x/crypto/blowfish/cipher.go | 99 - vendor/golang.org/x/crypto/blowfish/const.go | 199 - vendor/golang.org/x/crypto/cast5/cast5.go | 533 - .../x/crypto/openpgp/armor/armor.go | 219 - .../x/crypto/openpgp/armor/encode.go | 160 - .../x/crypto/openpgp/canonical_text.go | 59 - .../x/crypto/openpgp/elgamal/elgamal.go | 122 - .../x/crypto/openpgp/errors/errors.go | 72 - vendor/golang.org/x/crypto/openpgp/keys.go | 693 - .../x/crypto/openpgp/packet/compressed.go | 123 - .../x/crypto/openpgp/packet/config.go | 91 - .../x/crypto/openpgp/packet/encrypted_key.go | 206 - .../x/crypto/openpgp/packet/literal.go | 89 - .../x/crypto/openpgp/packet/ocfb.go | 143 - .../openpgp/packet/one_pass_signature.go | 73 - .../x/crypto/openpgp/packet/opaque.go | 162 - .../x/crypto/openpgp/packet/packet.go | 551 - .../x/crypto/openpgp/packet/private_key.go | 385 - .../x/crypto/openpgp/packet/public_key.go | 753 -- .../x/crypto/openpgp/packet/public_key_v3.go | 279 - .../x/crypto/openpgp/packet/reader.go | 76 - .../x/crypto/openpgp/packet/signature.go | 731 -- .../x/crypto/openpgp/packet/signature_v3.go | 146 - .../openpgp/packet/symmetric_key_encrypted.go | 155 - .../openpgp/packet/symmetrically_encrypted.go | 290 - .../x/crypto/openpgp/packet/userattribute.go | 91 - .../x/crypto/openpgp/packet/userid.go | 160 - vendor/golang.org/x/crypto/openpgp/read.go | 442 - vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 273 - vendor/golang.org/x/crypto/openpgp/write.go | 418 - .../golang.org/x/net/internal/socks/client.go | 168 - .../golang.org/x/net/internal/socks/socks.go | 317 - vendor/golang.org/x/net/proxy/dial.go | 54 - vendor/golang.org/x/net/proxy/direct.go | 31 - vendor/golang.org/x/net/proxy/per_host.go | 155 - vendor/golang.org/x/net/proxy/proxy.go | 149 - vendor/golang.org/x/net/proxy/socks5.go | 42 - vendor/golang.org/x/sync/AUTHORS | 3 - vendor/golang.org/x/sync/CONTRIBUTORS | 3 - vendor/golang.org/x/sync/LICENSE | 27 - vendor/golang.org/x/sync/PATENTS | 22 - vendor/golang.org/x/sync/errgroup/errgroup.go | 66 - .../grpc/reflection/README.md | 18 + .../grpc_reflection_v1alpha/reflection.pb.go | 750 ++ .../grpc_reflection_v1alpha/reflection.proto | 136 + .../grpc/reflection/serverreflection.go | 454 + .../grpc/test/bufconn/bufconn.go | 308 - vendor/modules.txt | 251 +- 1541 files changed, 31996 insertions(+), 159160 deletions(-) rename vendor/github.com/{mitchellh/colorstring => Azure/go-ansiterm}/LICENSE (96%) create mode 100644 vendor/github.com/Azure/go-ansiterm/README.md create mode 100644 vendor/github.com/Azure/go-ansiterm/constants.go create mode 100644 vendor/github.com/Azure/go-ansiterm/context.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_entry_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_param_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/event_handler.go create mode 100644 vendor/github.com/Azure/go-ansiterm/ground_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/osc_string_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_actions.go create mode 100644 vendor/github.com/Azure/go-ansiterm/states.go create mode 100644 vendor/github.com/Azure/go-ansiterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/ansi.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/api.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/LICENSE delete mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go create mode 100644 vendor/github.com/Nvveen/Gotty/LICENSE create mode 100644 vendor/github.com/Nvveen/Gotty/README create mode 100644 vendor/github.com/Nvveen/Gotty/TODO create mode 100644 vendor/github.com/Nvveen/Gotty/attributes.go create mode 100644 vendor/github.com/Nvveen/Gotty/gotty.go create mode 100644 vendor/github.com/Nvveen/Gotty/parser.go create mode 100644 vendor/github.com/Nvveen/Gotty/types.go delete mode 100644 vendor/github.com/apparentlymart/go-cidr/LICENSE delete mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go delete mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go delete mode 100644 vendor/github.com/armon/go-radix/.travis.yml delete mode 100644 vendor/github.com/armon/go-radix/LICENSE delete mode 100644 vendor/github.com/armon/go-radix/README.md delete mode 100644 vendor/github.com/armon/go-radix/go.mod delete mode 100644 vendor/github.com/armon/go-radix/radix.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go rename vendor/github.com/aws/aws-sdk-go/aws/{context_1_5.go => context.go} (58%) rename vendor/github.com/aws/aws-sdk-go/aws/{context_background_1_5.go => context_1_6.go} (59%) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go delete mode 100644 vendor/github.com/bgentry/speakeasy/.gitignore delete mode 100644 vendor/github.com/bgentry/speakeasy/LICENSE delete mode 100644 vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS delete mode 100644 vendor/github.com/bgentry/speakeasy/Readme.md delete mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy.go delete mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_unix.go delete mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_windows.go delete mode 100644 vendor/github.com/cenkalti/backoff/.gitignore rename vendor/github.com/{armon/go-radix => cenkalti/backoff/v3}/.gitignore (100%) rename vendor/github.com/cenkalti/backoff/{ => v3}/.travis.yml (100%) rename vendor/github.com/cenkalti/backoff/{ => v3}/LICENSE (100%) rename vendor/github.com/cenkalti/backoff/{ => v3}/README.md (100%) rename vendor/github.com/cenkalti/backoff/{ => v3}/backoff.go (100%) rename vendor/github.com/cenkalti/backoff/{ => v3}/context.go (100%) rename vendor/github.com/cenkalti/backoff/{ => v3}/exponential.go (100%) create mode 100644 vendor/github.com/cenkalti/backoff/v3/go.mod rename vendor/github.com/cenkalti/backoff/{ => v3}/retry.go (100%) rename vendor/github.com/cenkalti/backoff/{ => v3}/ticker.go (100%) rename vendor/github.com/cenkalti/backoff/{ => v3}/tries.go (100%) delete mode 100644 vendor/github.com/containerd/continuity/fs/copy.go delete mode 100644 vendor/github.com/containerd/continuity/fs/copy_linux.go delete mode 100644 vendor/github.com/containerd/continuity/fs/copy_unix.go delete mode 100644 vendor/github.com/containerd/continuity/fs/copy_windows.go delete mode 100644 vendor/github.com/containerd/continuity/fs/diff.go delete mode 100644 vendor/github.com/containerd/continuity/fs/diff_unix.go delete mode 100644 vendor/github.com/containerd/continuity/fs/diff_windows.go delete mode 100644 vendor/github.com/containerd/continuity/fs/dtype_linux.go delete mode 100644 vendor/github.com/containerd/continuity/fs/du.go delete mode 100644 vendor/github.com/containerd/continuity/fs/du_unix.go delete mode 100644 vendor/github.com/containerd/continuity/fs/du_windows.go delete mode 100644 vendor/github.com/containerd/continuity/fs/hardlink.go delete mode 100644 vendor/github.com/containerd/continuity/fs/hardlink_unix.go delete mode 100644 vendor/github.com/containerd/continuity/fs/hardlink_windows.go delete mode 100644 vendor/github.com/containerd/continuity/fs/path.go delete mode 100644 vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go delete mode 100644 vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go delete mode 100644 vendor/github.com/containerd/continuity/fs/time.go delete mode 100644 vendor/github.com/containerd/continuity/syscallx/syscall_unix.go delete mode 100644 vendor/github.com/containerd/continuity/syscallx/syscall_windows.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/README.md delete mode 100644 vendor/github.com/containerd/continuity/sysx/file_posix.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/generate.sh delete mode 100644 vendor/github.com/containerd/continuity/sysx/nodata_linux.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/nodata_solaris.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/nodata_unix.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/xattr.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go delete mode 100644 vendor/github.com/docker/distribution/LICENSE delete mode 100644 vendor/github.com/docker/distribution/digestset/set.go delete mode 100644 vendor/github.com/docker/distribution/reference/helpers.go delete mode 100644 vendor/github.com/docker/distribution/reference/normalize.go delete mode 100644 vendor/github.com/docker/distribution/reference/reference.go delete mode 100644 vendor/github.com/docker/distribution/reference/regexp.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/errors.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/handler.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/register.go delete mode 100644 vendor/github.com/docker/docker/AUTHORS delete mode 100644 vendor/github.com/docker/docker/LICENSE delete mode 100644 vendor/github.com/docker/docker/NOTICE delete mode 100644 vendor/github.com/docker/docker/api/README.md delete mode 100644 vendor/github.com/docker/docker/api/common.go delete mode 100644 vendor/github.com/docker/docker/api/common_unix.go delete mode 100644 vendor/github.com/docker/docker/api/common_windows.go delete mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml delete mode 100644 vendor/github.com/docker/docker/api/swagger.yaml delete mode 100644 vendor/github.com/docker/docker/api/types/events/events.go delete mode 100644 vendor/github.com/docker/docker/api/types/image/image_history.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go delete mode 100644 vendor/github.com/docker/docker/api/types/time/duration_convert.go delete mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_create.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_list.go delete mode 100644 vendor/github.com/docker/docker/client/README.md delete mode 100644 vendor/github.com/docker/docker/client/build_cancel.go delete mode 100644 vendor/github.com/docker/docker/client/build_prune.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go delete mode 100644 vendor/github.com/docker/docker/client/client.go delete mode 100644 vendor/github.com/docker/docker/client/client_deprecated.go delete mode 100644 vendor/github.com/docker/docker/client/client_unix.go delete mode 100644 vendor/github.com/docker/docker/client/client_windows.go delete mode 100644 vendor/github.com/docker/docker/client/config_create.go delete mode 100644 vendor/github.com/docker/docker/client/config_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/config_list.go delete mode 100644 vendor/github.com/docker/docker/client/config_remove.go delete mode 100644 vendor/github.com/docker/docker/client/config_update.go delete mode 100644 vendor/github.com/docker/docker/client/container_attach.go delete mode 100644 vendor/github.com/docker/docker/client/container_commit.go delete mode 100644 vendor/github.com/docker/docker/client/container_copy.go delete mode 100644 vendor/github.com/docker/docker/client/container_create.go delete mode 100644 vendor/github.com/docker/docker/client/container_diff.go delete mode 100644 vendor/github.com/docker/docker/client/container_exec.go delete mode 100644 vendor/github.com/docker/docker/client/container_export.go delete mode 100644 vendor/github.com/docker/docker/client/container_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/container_kill.go delete mode 100644 vendor/github.com/docker/docker/client/container_list.go delete mode 100644 vendor/github.com/docker/docker/client/container_logs.go delete mode 100644 vendor/github.com/docker/docker/client/container_pause.go delete mode 100644 vendor/github.com/docker/docker/client/container_prune.go delete mode 100644 vendor/github.com/docker/docker/client/container_remove.go delete mode 100644 vendor/github.com/docker/docker/client/container_rename.go delete mode 100644 vendor/github.com/docker/docker/client/container_resize.go delete mode 100644 vendor/github.com/docker/docker/client/container_restart.go delete mode 100644 vendor/github.com/docker/docker/client/container_start.go delete mode 100644 vendor/github.com/docker/docker/client/container_stats.go delete mode 100644 vendor/github.com/docker/docker/client/container_stop.go delete mode 100644 vendor/github.com/docker/docker/client/container_top.go delete mode 100644 vendor/github.com/docker/docker/client/container_unpause.go delete mode 100644 vendor/github.com/docker/docker/client/container_update.go delete mode 100644 vendor/github.com/docker/docker/client/container_wait.go delete mode 100644 vendor/github.com/docker/docker/client/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/client/distribution_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/errors.go delete mode 100644 vendor/github.com/docker/docker/client/events.go delete mode 100644 vendor/github.com/docker/docker/client/hijack.go delete mode 100644 vendor/github.com/docker/docker/client/image_build.go delete mode 100644 vendor/github.com/docker/docker/client/image_create.go delete mode 100644 vendor/github.com/docker/docker/client/image_history.go delete mode 100644 vendor/github.com/docker/docker/client/image_import.go delete mode 100644 vendor/github.com/docker/docker/client/image_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/image_list.go delete mode 100644 vendor/github.com/docker/docker/client/image_load.go delete mode 100644 vendor/github.com/docker/docker/client/image_prune.go delete mode 100644 vendor/github.com/docker/docker/client/image_pull.go delete mode 100644 vendor/github.com/docker/docker/client/image_push.go delete mode 100644 vendor/github.com/docker/docker/client/image_remove.go delete mode 100644 vendor/github.com/docker/docker/client/image_save.go delete mode 100644 vendor/github.com/docker/docker/client/image_search.go delete mode 100644 vendor/github.com/docker/docker/client/image_tag.go delete mode 100644 vendor/github.com/docker/docker/client/info.go delete mode 100644 vendor/github.com/docker/docker/client/interface.go delete mode 100644 vendor/github.com/docker/docker/client/interface_experimental.go delete mode 100644 vendor/github.com/docker/docker/client/interface_stable.go delete mode 100644 vendor/github.com/docker/docker/client/login.go delete mode 100644 vendor/github.com/docker/docker/client/network_connect.go delete mode 100644 vendor/github.com/docker/docker/client/network_create.go delete mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go delete mode 100644 vendor/github.com/docker/docker/client/network_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/network_list.go delete mode 100644 vendor/github.com/docker/docker/client/network_prune.go delete mode 100644 vendor/github.com/docker/docker/client/network_remove.go delete mode 100644 vendor/github.com/docker/docker/client/node_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/node_list.go delete mode 100644 vendor/github.com/docker/docker/client/node_remove.go delete mode 100644 vendor/github.com/docker/docker/client/node_update.go delete mode 100644 vendor/github.com/docker/docker/client/options.go delete mode 100644 vendor/github.com/docker/docker/client/ping.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_create.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_install.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_list.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_push.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_set.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go delete mode 100644 vendor/github.com/docker/docker/client/request.go delete mode 100644 vendor/github.com/docker/docker/client/secret_create.go delete mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/secret_list.go delete mode 100644 vendor/github.com/docker/docker/client/secret_remove.go delete mode 100644 vendor/github.com/docker/docker/client/secret_update.go delete mode 100644 vendor/github.com/docker/docker/client/service_create.go delete mode 100644 vendor/github.com/docker/docker/client/service_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/service_list.go delete mode 100644 vendor/github.com/docker/docker/client/service_logs.go delete mode 100644 vendor/github.com/docker/docker/client/service_remove.go delete mode 100644 vendor/github.com/docker/docker/client/service_update.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_init.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_join.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_update.go delete mode 100644 vendor/github.com/docker/docker/client/task_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/task_list.go delete mode 100644 vendor/github.com/docker/docker/client/task_logs.go delete mode 100644 vendor/github.com/docker/docker/client/transport.go delete mode 100644 vendor/github.com/docker/docker/client/utils.go delete mode 100644 vendor/github.com/docker/docker/client/version.go delete mode 100644 vendor/github.com/docker/docker/client/volume_create.go delete mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/volume_list.go delete mode 100644 vendor/github.com/docker/docker/client/volume_prune.go delete mode 100644 vendor/github.com/docker/docker/client/volume_remove.go delete mode 100644 vendor/github.com/docker/docker/errdefs/defs.go delete mode 100644 vendor/github.com/docker/docker/errdefs/doc.go delete mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/http_helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/is.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mount.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/unmount_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/args_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/init_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/init_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lcow.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/README.md delete mode 100644 vendor/github.com/docker/go-connections/sockets/inmem_socket.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/proxy.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/sockets.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_unix.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_windows.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/tcp_socket.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/unix_socket.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go delete mode 100644 vendor/github.com/fatih/color/.travis.yml delete mode 100644 vendor/github.com/fatih/color/Gopkg.lock delete mode 100644 vendor/github.com/fatih/color/Gopkg.toml delete mode 100644 vendor/github.com/fatih/color/LICENSE.md delete mode 100644 vendor/github.com/fatih/color/README.md delete mode 100644 vendor/github.com/fatih/color/color.go delete mode 100644 vendor/github.com/fatih/color/doc.go delete mode 100644 vendor/github.com/gogo/protobuf/AUTHORS delete mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/gogo/protobuf/LICENSE delete mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go rename vendor/github.com/{gogo/protobuf/proto/custom_gogo.go => golang/protobuf/ptypes/empty/empty.proto} (54%) delete mode 100644 vendor/github.com/google/uuid/.travis.yml delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/google/uuid/LICENSE delete mode 100644 vendor/github.com/google/uuid/README.md delete mode 100644 vendor/github.com/google/uuid/dce.go delete mode 100644 vendor/github.com/google/uuid/doc.go delete mode 100644 vendor/github.com/google/uuid/go.mod delete mode 100644 vendor/github.com/google/uuid/hash.go delete mode 100644 vendor/github.com/google/uuid/marshal.go delete mode 100644 vendor/github.com/google/uuid/node.go delete mode 100644 vendor/github.com/google/uuid/node_js.go delete mode 100644 vendor/github.com/google/uuid/node_net.go delete mode 100644 vendor/github.com/google/uuid/sql.go delete mode 100644 vendor/github.com/google/uuid/time.go delete mode 100644 vendor/github.com/google/uuid/util.go delete mode 100644 vendor/github.com/google/uuid/uuid.go delete mode 100644 vendor/github.com/google/uuid/version1.go delete mode 100644 vendor/github.com/google/uuid/version4.go rename vendor/github.com/{mattn/go-colorable => hashicorp/go-cty}/LICENSE (94%) create mode 100644 vendor/github.com/hashicorp/go-cty/cty/capsule.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/collection.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/doc.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/public.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/unify.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/doc.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/element_iterator.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/error.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gob.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gocty/in.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gocty/out.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/helper.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/doc.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/marshal.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/simple.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/value.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/list_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/map_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/marks.go rename vendor/github.com/{zclconf => hashicorp}/go-cty/cty/msgpack/doc.go (100%) rename vendor/github.com/{zclconf => hashicorp}/go-cty/cty/msgpack/dynamic.go (95%) rename vendor/github.com/{zclconf => hashicorp}/go-cty/cty/msgpack/infinity.go (100%) rename vendor/github.com/{zclconf => hashicorp}/go-cty/cty/msgpack/marshal.go (98%) rename vendor/github.com/{zclconf => hashicorp}/go-cty/cty/msgpack/type_implied.go (99%) rename vendor/github.com/{zclconf => hashicorp}/go-cty/cty/msgpack/unknown.go (100%) rename vendor/github.com/{zclconf => hashicorp}/go-cty/cty/msgpack/unmarshal.go (99%) create mode 100644 vendor/github.com/hashicorp/go-cty/cty/null.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/object_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/path.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/path_set.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/primitive_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set/gob.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set/iterator.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set/ops.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set/rules.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set/set.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set_helper.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set_internals.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/tuple_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/type_conform.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/types_to_register.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/unknown.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/value.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/value_init.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/value_ops.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/walk.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_stdio.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto delete mode 100644 vendor/github.com/hashicorp/hcl/.gitignore delete mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml delete mode 100644 vendor/github.com/hashicorp/hcl/Makefile delete mode 100644 vendor/github.com/hashicorp/hcl/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/hcl/decoder.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/lex.go delete mode 100644 vendor/github.com/hashicorp/hcl/parse.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/types.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/public.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcled/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/ast.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/doc.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/navigation.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/peeker.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/public.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/spec.md delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/structure.go delete mode 100644 vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/LICENSE delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go delete mode 100644 vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go rename vendor/github.com/hashicorp/{hcl => terraform-plugin-sdk/v2}/LICENSE (100%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/doc.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/acctest/helper.go (89%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/customdiff/compose.go (88%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/customdiff/computed.go (62%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/customdiff/condition.go (69%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/customdiff/doc.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/customdiff/force_new.go (80%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/customdiff/validate.go (71%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/hashcode/hashcode.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/logging/logging.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/logging/transport.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/resource/error.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/resource/id.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/resource/state.go (87%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/resource/state_shim.go (54%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/resource/testing.go (66%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/resource/testing_new.go (79%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/resource/testing_new_config.go (91%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/resource/testing_new_import_state.go (67%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/resource/wait.go (61%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/README.md (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/core_schema.go (94%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/data_source_resource_shim.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/equal.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/field_reader.go (96%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/field_reader_config.go (85%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/field_reader_diff.go (99%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/field_reader_map.go (88%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/field_reader_multi.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/field_writer.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/field_writer_map.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/getsource_string.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/provider.go (55%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/resource.go (71%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/resource_data.go (85%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/resource_data_get_source.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/resource_diff.go (99%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/resource_importer.go (61%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/resource_timeout.go (98%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/schema.go (75%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/serialize.go (94%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/set.go (85%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/shims.go (81%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/testing.go (56%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/valuetype.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/helper/schema/valuetype_string.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/addrs/doc.go (100%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/configs/configschema/coerce_value.go (98%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/configs/configschema/doc.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/configs/configschema/empty_value.go (98%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/configs/configschema/nestingmode_string.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/configs/configschema/schema.go (99%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/configs/hcl2shim/flatmap.go (99%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/configs/hcl2shim/paths.go (99%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/configs/hcl2shim/single_attr_body.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/configs/hcl2shim/values.go (98%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/configs/hcl2shim/values_equiv.go (99%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/diagutils/diagutils.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/helper/plugin/doc.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/helper/plugin/grpc_provider.go (85%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/helper/plugin/unknown.go (96%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/plans/objchange/normalize_obj.go (97%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/plugin/convert/diagnostics.go (64%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/plugin/convert/schema.go (91%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfdiags/config_traversals.go (73%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfdiags/diagnostic_base.go (73%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfdiags/diagnostics.go (64%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfdiags/doc.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfdiags/error.go (60%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfdiags/rpc_friendly.go (66%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfdiags/severity_string.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfdiags/simple_warning.go (63%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfplugin5/generate.sh (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfplugin5/tfplugin5.pb.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/internal/tfplugin5/tfplugin5.proto (100%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/grpc_provider.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/terraform/diff.go (70%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/terraform/resource.go (60%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/terraform/resource_mode.go (100%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/terraform/resource_mode_string.go (100%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/terraform/state.go (72%) rename vendor/github.com/hashicorp/terraform-plugin-sdk/{ => v2}/terraform/state_filter.go (77%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/cache.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/static.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/disco/disco.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/disco/host.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/go.mod delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/go.sum delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/label_iter.go delete mode 100644 vendor/github.com/hashicorp/terraform-svchost/svchost.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccessSDK.go delete mode 100644 vendor/github.com/mattn/go-colorable/.travis.yml delete mode 100644 vendor/github.com/mattn/go-colorable/README.md delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go delete mode 100644 vendor/github.com/mattn/go-colorable/go.mod delete mode 100644 vendor/github.com/mattn/go-colorable/go.sum delete mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go delete mode 100644 vendor/github.com/mattn/go-isatty/.travis.yml delete mode 100644 vendor/github.com/mattn/go-isatty/LICENSE delete mode 100644 vendor/github.com/mattn/go-isatty/README.md delete mode 100644 vendor/github.com/mattn/go-isatty/doc.go delete mode 100644 vendor/github.com/mattn/go-isatty/go.mod delete mode 100644 vendor/github.com/mattn/go-isatty/go.sum delete mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go delete mode 100644 vendor/github.com/mattn/go-isatty/renovate.json delete mode 100644 vendor/github.com/mitchellh/cli/.travis.yml delete mode 100644 vendor/github.com/mitchellh/cli/LICENSE delete mode 100644 vendor/github.com/mitchellh/cli/Makefile delete mode 100644 vendor/github.com/mitchellh/cli/README.md delete mode 100644 vendor/github.com/mitchellh/cli/autocomplete.go delete mode 100644 vendor/github.com/mitchellh/cli/cli.go delete mode 100644 vendor/github.com/mitchellh/cli/command.go delete mode 100644 vendor/github.com/mitchellh/cli/command_mock.go delete mode 100644 vendor/github.com/mitchellh/cli/go.mod delete mode 100644 vendor/github.com/mitchellh/cli/go.sum delete mode 100644 vendor/github.com/mitchellh/cli/help.go delete mode 100644 vendor/github.com/mitchellh/cli/ui.go delete mode 100644 vendor/github.com/mitchellh/cli/ui_colored.go delete mode 100644 vendor/github.com/mitchellh/cli/ui_concurrent.go delete mode 100644 vendor/github.com/mitchellh/cli/ui_mock.go delete mode 100644 vendor/github.com/mitchellh/cli/ui_writer.go delete mode 100644 vendor/github.com/mitchellh/colorstring/.travis.yml delete mode 100644 vendor/github.com/mitchellh/colorstring/README.md delete mode 100644 vendor/github.com/mitchellh/colorstring/colorstring.go delete mode 100644 vendor/github.com/mitchellh/colorstring/go.mod delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/linux.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/proc.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go create mode 100644 vendor/github.com/ory/dockertest/v3/.gitignore create mode 100644 vendor/github.com/ory/dockertest/v3/.travis.yml create mode 100644 vendor/github.com/ory/dockertest/v3/CONTRIBUTING.md rename vendor/github.com/{zclconf/go-cty-yaml => ory/dockertest/v3}/LICENSE (99%) create mode 100644 vendor/github.com/ory/dockertest/v3/README.md create mode 100644 vendor/github.com/ory/dockertest/v3/SECURITY.md create mode 100644 vendor/github.com/ory/dockertest/v3/docker/AUTHORS create mode 100644 vendor/github.com/ory/dockertest/v3/docker/DOCKER-LICENSE create mode 100644 vendor/github.com/ory/dockertest/v3/docker/LICENSE create mode 100644 vendor/github.com/ory/dockertest/v3/docker/README.markdown create mode 100644 vendor/github.com/ory/dockertest/v3/docker/auth.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/change.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/client.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/client_unix.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/client_windows.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/container.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/distribution.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/env.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/event.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/exec.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/image.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/misc.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/network.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/env.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/hosts.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/hosts_unix.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/hosts_windows.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/ip.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/opts.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/opts_unix.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/opts_windows.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/quotedstring.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/runtime.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/opts/ulimit.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/README.md (100%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/archive.go (92%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_other.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/archive_unix.go (67%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/archive_windows.go (70%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/changes.go (94%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/changes_linux.go (89%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/changes_other.go (93%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/changes_unix.go (59%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_windows.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/copy.go (96%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/copy_unix.go (62%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/copy_windows.go (58%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/diff.go (93%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/time_linux.go (68%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/time_unsupported.go (73%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/whiteouts.go (93%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/archive/wrap.go (94%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/fileutils/fileutils.go (97%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/fileutils/fileutils_darwin.go (84%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/fileutils/fileutils_unix.go (84%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/fileutils/fileutils_windows.go (62%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_linux.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_others.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_unix.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_windows.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/idtools/idtools.go (83%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/idtools/idtools_unix.go (94%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/idtools_windows.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/idtools/usergroupadd_linux.go (98%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/idtools/usergroupadd_unsupported.go (83%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/idtools/utils_unix.go (90%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/ioutils/buffer.go (91%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/ioutils/bytespipe.go (98%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/ioutils/fswriters.go (98%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/ioutils/readers.go (97%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/ioutils/temp_unix.go (71%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/ioutils/temp_windows.go (70%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/ioutils/writeflusher.go (96%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/ioutils/writers.go (94%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/jsonmessage/jsonmessage.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/longpath/longpath.go (90%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/mount/flags.go (89%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/mount/flags_freebsd.go (92%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/mount/flags_linux.go (97%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/mount/flags_unsupported.go (87%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mount.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/mount/mounter_freebsd.go (78%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/mount/mounter_linux.go (73%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mounter_unsupported.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/mount/mountinfo.go (93%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/mount/mountinfo_freebsd.go (74%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_linux.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/mount/mountinfo_unsupported.go (62%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_windows.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/mount/sharedsubtree_linux.go (85%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/pools/pools.go (96%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/stdcopy/stdcopy.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/chtimes.go (90%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/chtimes_unix.go (78%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/chtimes_windows.go (90%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/errors.go (82%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/exitcode.go (85%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/filesys.go (97%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/filesys_windows.go (95%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/init.go (83%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/system/init_unix.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/system/init_windows.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/system/lcow.go rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/lcow_unix.go (65%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/lcow_windows.go (63%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/lstat_unix.go (71%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/lstat_windows.go (78%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/meminfo.go (83%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/meminfo_linux.go (94%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/meminfo_unsupported.go (70%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/meminfo_windows.go (93%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/mknod.go (91%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/mknod_windows.go (77%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/path.go (96%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/process_unix.go (84%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/process_windows.go (80%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/rm.go (93%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/stat_darwin.go (78%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/stat_freebsd.go (78%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/stat_linux.go (86%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/stat_openbsd.go (78%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/stat_solaris.go (78%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/stat_unix.go (90%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/stat_windows.go (92%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/syscall_unix.go (85%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/syscall_windows.go (57%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/umask.go (76%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/umask_windows.go (71%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/utimes_freebsd.go (88%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/utimes_linux.go (89%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/utimes_unsupported.go (72%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/xattrs_linux.go (91%) rename vendor/github.com/{docker => ory/dockertest/v3}/docker/pkg/system/xattrs_unsupported.go (82%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/ascii.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/proxy.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/tc.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/term.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/term_windows.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/termios_bsd.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/termios_linux.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/ansi_reader.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/ansi_writer.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/console.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/windows.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/pkg/term/winsize.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/plugin.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/signal.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/tar.go create mode 100644 vendor/github.com/ory/dockertest/v3/docker/tls.go rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/auth.go (91%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/blkiodev/blkio.go (84%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/client.go (91%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/configs.go (81%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/container/config.go (95%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/container/container_changes.go (88%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/container/container_create.go (89%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/container/container_top.go (89%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/container/container_update.go (86%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/container/container_wait.go (91%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/container/host_config.go (89%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/container/hostconfig_unix.go (92%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/container/hostconfig_windows.go (92%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/container/waitcondition.go (90%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/error_response.go (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/filters/parse.go (84%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/graph_driver_data.go (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/id_response.go (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/image_delete_response_item.go (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/image_summary.go (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/mount/mount.go (96%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/network/network.go (84%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/plugin.go (97%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/plugin_device.go (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/plugin_env.go (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/plugin_interface_type.go (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/plugin_mount.go (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/plugin_responses.go (96%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/port.go (89%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/registry/authenticate.go (88%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/registry/registry.go (97%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/seccomp.go (93%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/service_update_response.go (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/stats.go (96%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/strslice/strslice.go (90%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/types.go (94%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/versions/README.md (100%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/versions/compare.go (94%) rename vendor/github.com/{docker/docker/api => ory/dockertest/v3/docker}/types/volume.go (100%) create mode 100644 vendor/github.com/ory/dockertest/v3/docker/volume.go create mode 100644 vendor/github.com/ory/dockertest/v3/dockertest.go create mode 100644 vendor/github.com/ory/dockertest/v3/go.mod create mode 100644 vendor/github.com/ory/dockertest/v3/go.sum delete mode 100644 vendor/github.com/posener/complete/.gitignore delete mode 100644 vendor/github.com/posener/complete/.travis.yml delete mode 100644 vendor/github.com/posener/complete/LICENSE.txt delete mode 100644 vendor/github.com/posener/complete/args.go delete mode 100644 vendor/github.com/posener/complete/cmd/cmd.go delete mode 100644 vendor/github.com/posener/complete/cmd/install/bash.go delete mode 100644 vendor/github.com/posener/complete/cmd/install/fish.go delete mode 100644 vendor/github.com/posener/complete/cmd/install/install.go delete mode 100644 vendor/github.com/posener/complete/cmd/install/utils.go delete mode 100644 vendor/github.com/posener/complete/cmd/install/zsh.go delete mode 100644 vendor/github.com/posener/complete/command.go delete mode 100644 vendor/github.com/posener/complete/complete.go delete mode 100644 vendor/github.com/posener/complete/go.mod delete mode 100644 vendor/github.com/posener/complete/go.sum delete mode 100644 vendor/github.com/posener/complete/log.go delete mode 100644 vendor/github.com/posener/complete/match/file.go delete mode 100644 vendor/github.com/posener/complete/match/match.go delete mode 100644 vendor/github.com/posener/complete/match/prefix.go delete mode 100644 vendor/github.com/posener/complete/predict.go delete mode 100644 vendor/github.com/posener/complete/predict_files.go delete mode 100644 vendor/github.com/posener/complete/predict_set.go delete mode 100644 vendor/github.com/posener/complete/readme.md delete mode 100644 vendor/github.com/posener/complete/test.sh delete mode 100644 vendor/github.com/posener/complete/utils.go delete mode 100644 vendor/github.com/spf13/afero/.travis.yml delete mode 100644 vendor/github.com/spf13/afero/LICENSE.txt delete mode 100644 vendor/github.com/spf13/afero/README.md delete mode 100644 vendor/github.com/spf13/afero/afero.go delete mode 100644 vendor/github.com/spf13/afero/appveyor.yml delete mode 100644 vendor/github.com/spf13/afero/basepath.go delete mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go delete mode 100644 vendor/github.com/spf13/afero/const_bsds.go delete mode 100644 vendor/github.com/spf13/afero/const_win_unix.go delete mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go delete mode 100644 vendor/github.com/spf13/afero/go.mod delete mode 100644 vendor/github.com/spf13/afero/go.sum delete mode 100644 vendor/github.com/spf13/afero/httpFs.go delete mode 100644 vendor/github.com/spf13/afero/ioutil.go delete mode 100644 vendor/github.com/spf13/afero/lstater.go delete mode 100644 vendor/github.com/spf13/afero/match.go delete mode 100644 vendor/github.com/spf13/afero/mem/dir.go delete mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go delete mode 100644 vendor/github.com/spf13/afero/mem/file.go delete mode 100644 vendor/github.com/spf13/afero/memmap.go delete mode 100644 vendor/github.com/spf13/afero/os.go delete mode 100644 vendor/github.com/spf13/afero/path.go delete mode 100644 vendor/github.com/spf13/afero/readonlyfs.go delete mode 100644 vendor/github.com/spf13/afero/regexpfs.go delete mode 100644 vendor/github.com/spf13/afero/unionFile.go delete mode 100644 vendor/github.com/spf13/afero/util.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/.gitignore delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/.travis.yml delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/LICENSE delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/README.md delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/container.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/docker.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/generic.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/go.mod delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/go.sum delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/logconsumer.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/network.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/reaper.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/http.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/log.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/multi.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/sql.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/wait.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/.travis.yml delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/NOTICE delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/apic.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/converter.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/decode.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/emitterc.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/encode.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/error.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/go.mod delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/go.sum delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/implied_type.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/parserc.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/readerc.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/resolve.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/scannerc.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/writerc.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/yaml.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/yamlh.go delete mode 100644 vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go delete mode 100644 vendor/golang.org/x/crypto/AUTHORS delete mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/crypto/LICENSE delete mode 100644 vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go delete mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/block.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/const.go delete mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/read.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/write.go delete mode 100644 vendor/golang.org/x/net/internal/socks/client.go delete mode 100644 vendor/golang.org/x/net/internal/socks/socks.go delete mode 100644 vendor/golang.org/x/net/proxy/dial.go delete mode 100644 vendor/golang.org/x/net/proxy/direct.go delete mode 100644 vendor/golang.org/x/net/proxy/per_host.go delete mode 100644 vendor/golang.org/x/net/proxy/proxy.go delete mode 100644 vendor/golang.org/x/net/proxy/socks5.go delete mode 100644 vendor/golang.org/x/sync/AUTHORS delete mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sync/LICENSE delete mode 100644 vendor/golang.org/x/sync/PATENTS delete mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 vendor/google.golang.org/grpc/reflection/README.md create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto create mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go delete mode 100644 vendor/google.golang.org/grpc/test/bufconn/bufconn.go diff --git a/Makefile b/Makefile index 0934ea88..21f27f13 100644 --- a/Makefile +++ b/Makefile @@ -1,41 +1,40 @@ # Makefile VERSION ?= 0.0.0 NAME=terraform-provider-pingaccess_v${VERSION} -BASE_DOCKER_TAG=terraform-provider-pingaccess-test +BASE_DOCKER_TAG=pingidentity/pingaccess:6.0.2-edge pa-init: - @docker build -t ${BASE_DOCKER_TAG} . @docker run --rm -d --hostname pingaccess --name pingaccess -e PING_IDENTITY_DEVOPS_KEY=$(PING_IDENTITY_DEVOPS_KEY) -e PING_IDENTITY_DEVOPS_USER=$(PING_IDENTITY_DEVOPS_USER) -e PING_IDENTITY_ACCEPT_EULA=YES --publish 9000:9000 ${BASE_DOCKER_TAG} test: @rm -f pingaccess/terraform.log - @TF_LOG=TRACE TF_LOG_PATH=./terraform.log TF_ACC=1 go test -mod=vendor ./... -v + @TF_LOG=TRACE TF_LOG_PATH=./terraform.log TF_ACC=1 go test -mod=vendor ./... -v -trimpath unit-test: - @go test -mod=vendor ./... -v + @go test -mod=vendor ./... -v -trimpath test-and-report: @rm -f pingaccess/terraform.log coverage.out report.json - @TF_LOG=TRACE TF_LOG_PATH=./terraform.log TF_ACC=1 go test -mod=vendor ./... -v -coverprofile=coverage.out -json > report.json && go tool cover -func=coverage.out + @TF_LOG=TRACE TF_LOG_PATH=./terraform.log TF_ACC=1 go test -mod=vendor ./... -v -trimpath -coverprofile=coverage.out -json > report.json && go tool cover -func=coverage.out build: - @go build -mod=vendor -o ${NAME} -gcflags "all=-trimpath=$GOPATH" . + @go build -mod=vendor -o ${NAME} -trimpath . release: @rm -rf build/* - GOOS=darwin GOARCH=amd64 go build -mod=vendor -o build/darwin_amd64/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/darwin_amd64.zip build/darwin_amd64/${NAME} - GOOS=freebsd GOARCH=386 go build -mod=vendor -o build/freebsd_386/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/freebsd_386.zip build/freebsd_386/${NAME} - GOOS=freebsd GOARCH=amd64 go build -mod=vendor -o build/freebsd_amd64/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/freebsd_amd64.zip build/freebsd_amd64/${NAME} - GOOS=freebsd GOARCH=arm go build -mod=vendor -o build/freebsd_arm/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/freebsd_arm.zip build/freebsd_arm/${NAME} - GOOS=linux GOARCH=386 go build -mod=vendor -o build/linux_386/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/linux_386.zip build/linux_386/${NAME} - GOOS=linux GOARCH=amd64 go build -mod=vendor -o build/linux_amd64/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/linux_amd64.zip build/linux_amd64/${NAME} - GOOS=linux GOARCH=arm go build -mod=vendor -o build/linux_arm/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/linux_arm.zip build/linux_arm/${NAME} - GOOS=openbsd GOARCH=386 go build -mod=vendor -o build/openbsd_386/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/openbsd_386.zip build/openbsd_386/${NAME} - GOOS=openbsd GOARCH=amd64 go build -mod=vendor -o build/openbsd_amd64/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/openbsd_amd64.zip build/openbsd_amd64/${NAME} - GOOS=solaris GOARCH=amd64 go build -mod=vendor -o build/solaris_amd64/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/solaris_amd64.zip build/solaris_amd64/${NAME} - GOOS=windows GOARCH=386 go build -mod=vendor -o build/windows_386/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/windows_386.zip build/windows_386/${NAME} - GOOS=windows GOARCH=amd64 go build -mod=vendor -o build/windows_amd64/${NAME} -gcflags "all=-trimpath=$$GOPATH" . && zip -j build/windows_amd64.zip build/windows_amd64/${NAME} + GOOS=darwin GOARCH=amd64 go build -mod=vendor -o build/darwin_amd64/${NAME} -trimpath . && zip -j build/darwin_amd64.zip build/darwin_amd64/${NAME} + GOOS=freebsd GOARCH=386 go build -mod=vendor -o build/freebsd_386/${NAME} -trimpath . && zip -j build/freebsd_386.zip build/freebsd_386/${NAME} + GOOS=freebsd GOARCH=amd64 go build -mod=vendor -o build/freebsd_amd64/${NAME} -trimpath . && zip -j build/freebsd_amd64.zip build/freebsd_amd64/${NAME} + GOOS=freebsd GOARCH=arm go build -mod=vendor -o build/freebsd_arm/${NAME} -trimpath . && zip -j build/freebsd_arm.zip build/freebsd_arm/${NAME} + GOOS=linux GOARCH=386 go build -mod=vendor -o build/linux_386/${NAME} -trimpath . && zip -j build/linux_386.zip build/linux_386/${NAME} + GOOS=linux GOARCH=amd64 go build -mod=vendor -o build/linux_amd64/${NAME} -trimpath . && zip -j build/linux_amd64.zip build/linux_amd64/${NAME} + GOOS=linux GOARCH=arm go build -mod=vendor -o build/linux_arm/${NAME} -trimpath . && zip -j build/linux_arm.zip build/linux_arm/${NAME} + GOOS=openbsd GOARCH=386 go build -mod=vendor -o build/openbsd_386/${NAME} -trimpath . && zip -j build/openbsd_386.zip build/openbsd_386/${NAME} + GOOS=openbsd GOARCH=amd64 go build -mod=vendor -o build/openbsd_amd64/${NAME} -trimpath . && zip -j build/openbsd_amd64.zip build/openbsd_amd64/${NAME} + GOOS=solaris GOARCH=amd64 go build -mod=vendor -o build/solaris_amd64/${NAME} -trimpath . && zip -j build/solaris_amd64.zip build/solaris_amd64/${NAME} + GOOS=windows GOARCH=386 go build -mod=vendor -o build/windows_386/${NAME} -trimpath . && zip -j build/windows_386.zip build/windows_386/${NAME} + GOOS=windows GOARCH=amd64 go build -mod=vendor -o build/windows_amd64/${NAME} -trimpath . && zip -j build/windows_amd64.zip build/windows_amd64/${NAME} deploy-local: @mkdir -p ~/.terraform.d/plugins diff --git a/func-tests/main.tf b/func-tests/main.tf index bcf9da3d..4de5e4d1 100644 --- a/func-tests/main.tf +++ b/func-tests/main.tf @@ -1,5 +1,5 @@ provider "pingaccess" { - // password = "2Access2" + password = "2FederateM0re" } resource "pingaccess_site" "demo" { @@ -91,4 +91,7 @@ resource "pingaccess_rule" "demo_2" { EOF } - +resource "pingaccess_acme_server" "acc_test" { + name = "foo" + url = "https://host.docker.internal:14000/dir" +} diff --git a/go.mod b/go.mod index 99161304..bcca418f 100644 --- a/go.mod +++ b/go.mod @@ -1,18 +1,19 @@ module github.com/iwarapter/terraform-provider-pingaccess -go 1.13 +go 1.14 require ( - github.com/Microsoft/go-winio v0.4.14 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c // indirect - github.com/docker/go-units v0.4.0 // indirect github.com/google/go-cmp v0.3.1 - github.com/hashicorp/terraform-plugin-sdk v1.9.0 - github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200414193849-b443faf152d2 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.1 + github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200605105101-6a0dcd01d169 + github.com/ory/dockertest/v3 v3.6.0 + github.com/pkg/errors v0.9.1 // indirect github.com/sirupsen/logrus v1.5.0 // indirect - github.com/testcontainers/testcontainers-go v0.4.0 github.com/tidwall/gjson v1.3.2 github.com/tidwall/sjson v1.0.4 golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect ) diff --git a/go.sum b/go.sum index 57134648..d0675bcd 100644 --- a/go.sum +++ b/go.sum @@ -12,13 +12,10 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7O github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= @@ -33,34 +30,26 @@ github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFU github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= +github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ= -github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c h1:8ahmSVELW1wghbjerVAyuEYD5+Dio66RYvSS0iGfL1M= github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190506211059-b20a14b54661 h1:ZuxGvIvF01nfc/G9RJ5Q7Va1zQE2WJyG18Zv3DqCEf4= -github.com/docker/docker v0.7.3-0.20190506211059-b20a14b54661/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -76,20 +65,8 @@ github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U= -github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -113,20 +90,13 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -135,6 +105,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6K github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0 h1:ENHNi8494porjD0ZhIrjlAHnveSFhY7hvOJrV/fsKkw= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= @@ -142,8 +114,8 @@ github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxC github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.2.0 h1:CUfYokW0EJNDcGecVrHZK//Cp1GFlHwoqtcUIEiU6BY= +github.com/hashicorp/go-plugin v1.2.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= @@ -156,22 +128,16 @@ github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCO github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl/v2 v2.0.0 h1:efQznTz+ydmQXq3BOnRa3AXzvCeTq1P4dKj/z5GLlY8= github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 h1:+RyjwU+Gnd/aTJBPZVDNm903eXVjjqhbaR4Ypx3xYyY= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-json v0.4.0 h1:KNh29iNxozP5adfUFBJ4/fWd0Cu3taGgjHB38JYqOF4= github.com/hashicorp/terraform-json v0.4.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= -github.com/hashicorp/terraform-plugin-sdk v1.9.0 h1:WBHHIX/RgF6/lbfMCzx0qKl96BbQy3bexWFvDqt1bhE= -github.com/hashicorp/terraform-plugin-sdk v1.9.0/go.mod h1:C/AXwmDHqbc3h6URiHpIsVKrwV4PS0Sh0+VTaeEkShw= -github.com/hashicorp/terraform-plugin-test v1.2.0 h1:AWFdqyfnOj04sxTdaAF57QqvW7XXrT8PseUHkbKsE8I= -github.com/hashicorp/terraform-plugin-test v1.2.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.1 h1:ko726Cy16TsSOkVS08vAHTT37KYmHmuSkvIhXFFEhaY= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.1/go.mod h1:9f8d0niQjU7io5EpT5WhyAWdDUKKw1xytr+7hIsvna0= +github.com/hashicorp/terraform-plugin-test v1.3.0 h1:hU5LoxrOn9qvOo+LTKN6mSav2J+dAMprbdxJPEQvp4U= +github.com/hashicorp/terraform-plugin-test v1.3.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= @@ -179,15 +145,14 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKe github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200414193849-b443faf152d2 h1:UIKwldmS841vnb8eULEZ/DCyW2mCuiQlAv/Wu9z/1l0= -github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200414193849-b443faf152d2/go.mod h1:5FnKTPlitpx5z7Q/itbtKkgCEOBXXqhGOJqmryBZrZk= +github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200605105101-6a0dcd01d169 h1:QbikLuFtFP35NIeBSogGV1YEVmZorSe9XhHcTRake9o= +github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200605105101-6a0dcd01d169/go.mod h1:5FnKTPlitpx5z7Q/itbtKkgCEOBXXqhGOJqmryBZrZk= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= @@ -201,25 +166,14 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3v github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= @@ -229,6 +183,8 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.4 h1:ZU1VNC02qyufSZsjjs7+khruk2fKvbQ3TwRV/IBCeFA= +github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -238,30 +194,26 @@ github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/I github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc6 h1:7AoN22rYxxkmsJS48wFaziH/n0OvrZVqL/TglgHKbKQ= github.com/opencontainers/runc v1.0.0-rc6/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/ory/dockertest v3.3.4+incompatible h1:VrpM6Gqg7CrPm3bL4Wm1skO+zFWLbh7/Xb5kGEbJRh8= github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest/v3 v3.6.0 h1:I6KNJ6izxGduLACQii2SP/g7GN0JM9Xfaik6aAVaw6Y= +github.com/ory/dockertest/v3 v3.6.0/go.mod h1:4ZOpj8qBUmh8fcBSVzkH2bws2s91JdGvHUqan4GHEuQ= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -269,21 +221,16 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= -github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -295,8 +242,6 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/testcontainers/testcontainers-go v0.4.0 h1:BpzZG0/I4s4oAVrkYhf9K3R0AKLvhE0bX1kVSqyUVx8= -github.com/testcontainers/testcontainers-go v0.4.0/go.mod h1:BXwe1JilTOLT8cmVyPMDbIw7e+8UCGeAhxjBwguG5wQ= github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI= github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= @@ -305,21 +250,16 @@ github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/sjson v1.0.4 h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg= github.com/tidwall/sjson v1.0.4/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvcciqcxEHac4CYj89xI= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.1+incompatible h1:RMF1enSPeKTlXrXdOcqjFUElywVZjjC6pqse21bKbEU= github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.1.0 h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8= -github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -339,6 +279,7 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -351,8 +292,7 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb h1:TR699M2v0qoKTOHxeLgp6zPqaQNs74f01a/ob9W0qko= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= @@ -368,13 +308,10 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190311152110-c8c8c57fd1e1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -387,7 +324,7 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200121082415-34d275377bf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -400,8 +337,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -412,6 +347,7 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -424,6 +360,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -435,8 +372,7 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2El google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200310143817-43be25429f5a h1:lRlI5zu6AFy3iU/F8YWyNrAmn/tPCnhiTxfwhWb76eU= google.golang.org/genproto v0.0.0-20200310143817-43be25429f5a/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -457,12 +393,13 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/main.go b/main.go index c7f85a31..a45c8936 100644 --- a/main.go +++ b/main.go @@ -1,7 +1,7 @@ package main import ( - "github.com/hashicorp/terraform-plugin-sdk/plugin" + "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" "github.com/iwarapter/terraform-provider-pingaccess/pingaccess" ) diff --git a/pingaccess/config.go b/pingaccess/config.go index 28edb2cf..8e031acd 100644 --- a/pingaccess/config.go +++ b/pingaccess/config.go @@ -2,6 +2,7 @@ package pingaccess import ( "crypto/tls" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "net/http" "net/url" "os" @@ -45,7 +46,7 @@ type Config struct { } // Client configures and returns a fully initialized PAClient -func (c *Config) Client() (interface{}, error) { +func (c *Config) Client() (interface{}, diag.Diagnostics) { http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} url, _ := url.Parse(c.BaseURL) client := pingaccess.NewClient(c.Username, c.Password, url, c.Context, nil) diff --git a/pingaccess/data_source_pingaccess_acme_default.go b/pingaccess/data_source_pingaccess_acme_default.go index 15805f8b..9cfb85cc 100644 --- a/pingaccess/data_source_pingaccess_acme_default.go +++ b/pingaccess/data_source_pingaccess_acme_default.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/data_source_pingaccess_acme_default_test.go b/pingaccess/data_source_pingaccess_acme_default_test.go index 7c952408..803fc434 100644 --- a/pingaccess/data_source_pingaccess_acme_default_test.go +++ b/pingaccess/data_source_pingaccess_acme_default_test.go @@ -3,7 +3,7 @@ package pingaccess import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccPingAccessAcmeDefaultDataSource(t *testing.T) { diff --git a/pingaccess/data_source_pingaccess_certificate.go b/pingaccess/data_source_pingaccess_certificate.go index d006f47a..cec47439 100644 --- a/pingaccess/data_source_pingaccess_certificate.go +++ b/pingaccess/data_source_pingaccess_certificate.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/data_source_pingaccess_certificate_test.go b/pingaccess/data_source_pingaccess_certificate_test.go index 21265709..007191ba 100644 --- a/pingaccess/data_source_pingaccess_certificate_test.go +++ b/pingaccess/data_source_pingaccess_certificate_test.go @@ -5,8 +5,8 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccPingAccessCertificateDataSource(t *testing.T) { diff --git a/pingaccess/data_source_pingaccess_keypair.go b/pingaccess/data_source_pingaccess_keypair.go index 2f5bd78f..eb43b860 100644 --- a/pingaccess/data_source_pingaccess_keypair.go +++ b/pingaccess/data_source_pingaccess_keypair.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/data_source_pingaccess_keypair_test.go b/pingaccess/data_source_pingaccess_keypair_test.go index 83553c92..889100e7 100644 --- a/pingaccess/data_source_pingaccess_keypair_test.go +++ b/pingaccess/data_source_pingaccess_keypair_test.go @@ -4,8 +4,8 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccPingAccessKeyPairDataSource(t *testing.T) { diff --git a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go index e310aad3..dd5b1a36 100644 --- a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go +++ b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata_test.go b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata_test.go index 322a9d2d..eb8aa556 100644 --- a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata_test.go +++ b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata_test.go @@ -1,9 +1,11 @@ package pingaccess import ( + "fmt" + "os" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccPingAccessPingFederateRuntimeMetadataDataSource(t *testing.T) { @@ -53,9 +55,9 @@ func TestAccPingAccessPingFederateRuntimeMetadataDataSource(t *testing.T) { } func testAccPingAccessPingFederateRuntimeMetadataConfig() string { - return `data "pingaccess_pingfederate_runtime_metadata" "test" {} + return fmt.Sprintf(`data "pingaccess_pingfederate_runtime_metadata" "test" {} resource "pingaccess_pingfederate_runtime" "app_demo_pfr" { - issuer = "https://pf:9031" + issuer = "https://%s:9031" trusted_certificate_group_id = 2 -}` +}`, os.Getenv("PINGFEDERATE_TEST_IP")) } diff --git a/pingaccess/data_source_pingaccess_trusted_certificate_groups.go b/pingaccess/data_source_pingaccess_trusted_certificate_groups.go index 539268d2..62a8b8a2 100644 --- a/pingaccess/data_source_pingaccess_trusted_certificate_groups.go +++ b/pingaccess/data_source_pingaccess_trusted_certificate_groups.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/data_source_pingaccess_trusted_certificate_groups_test.go b/pingaccess/data_source_pingaccess_trusted_certificate_groups_test.go index 2dc6c629..8eefd4b7 100644 --- a/pingaccess/data_source_pingaccess_trusted_certificate_groups_test.go +++ b/pingaccess/data_source_pingaccess_trusted_certificate_groups_test.go @@ -5,8 +5,8 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccPingAccessTrustedCertificateGroupsDataSource(t *testing.T) { diff --git a/pingaccess/diff_suppress_funcs.go b/pingaccess/diff_suppress_funcs.go index 933602ff..ceeeebdf 100644 --- a/pingaccess/diff_suppress_funcs.go +++ b/pingaccess/diff_suppress_funcs.go @@ -3,7 +3,7 @@ package pingaccess import ( "bytes" "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func suppressEquivalentJsonDiffs(k, old, new string, d *schema.ResourceData) bool { diff --git a/pingaccess/provider.go b/pingaccess/provider.go index 9160d686..d5d2ad63 100644 --- a/pingaccess/provider.go +++ b/pingaccess/provider.go @@ -1,13 +1,14 @@ package pingaccess import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) //Provider does stuff // -func Provider() terraform.ResourceProvider { +func Provider() *schema.Provider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "username": { @@ -69,7 +70,7 @@ func Provider() terraform.ResourceProvider { "pingaccess_oauth_server": resourcePingAccessOAuthServer(), "pingaccess_http_config_request_host_source": resourcePingAccessHTTPConfigRequestHostSource(), }, - ConfigureFunc: providerConfigure, + ConfigureContextFunc: providerConfigure, } } @@ -84,7 +85,8 @@ func init() { } } -func providerConfigure(d *schema.ResourceData) (interface{}, error) { +func providerConfigure(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { + config := &Config{ Username: d.Get("username").(string), Password: d.Get("password").(string), diff --git a/pingaccess/provider_test.go b/pingaccess/provider_test.go index 99148ac9..70dcbf46 100644 --- a/pingaccess/provider_test.go +++ b/pingaccess/provider_test.go @@ -6,14 +6,15 @@ import ( "crypto/tls" "errors" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" - "github.com/testcontainers/testcontainers-go" - "github.com/testcontainers/testcontainers-go/wait" + "github.com/ory/dockertest/v3" "github.com/tidwall/sjson" "io/ioutil" "log" + "math/rand" "net/http" "net/url" "os" @@ -21,11 +22,23 @@ import ( "reflect" "runtime" "testing" + "time" ) func TestMain(m *testing.M) { + acctest.UseBinaryDriver("pingaccess", Provider) _, acceptanceTesting := os.LookupEnv("TF_ACC") if acceptanceTesting { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + networkName := "tf-pa-test-network" + network, err := pool.CreateNetwork(networkName) + if err != nil { + log.Fatalf("Could not create docker network: %s", err) + } + defer network.Close() devOpsUser, devOpsUserExists := os.LookupEnv("PING_IDENTITY_DEVOPS_USER") devOpsKey, devOpsKeyExists := os.LookupEnv("PING_IDENTITY_DEVOPS_KEY") @@ -34,124 +47,128 @@ func TestMain(m *testing.M) { log.Fatalf("Both PING_IDENTITY_DEVOPS_USER and PING_IDENTITY_DEVOPS_KEY environment variables must be set for acceptance tests.") } - ctx := context.Background() - networkName := "tf-pa-test-network" - gcr := testcontainers.GenericContainerRequest{ - ContainerRequest: testcontainers.ContainerRequest{ - FromDockerfile: testcontainers.FromDockerfile{ - Context: "../", - Dockerfile: "Dockerfile", - }, - Networks: []string{networkName}, - ExposedPorts: []string{"9000/tcp"}, - WaitingFor: wait.ForLog("INFO: No file named /instance/data/data.json found, skipping import."), - Name: "terraform-provider-pingaccess-test", - Env: map[string]string{"PING_IDENTITY_ACCEPT_EULA": "YES", "PING_IDENTITY_DEVOPS_USER": devOpsUser, "PING_IDENTITY_DEVOPS_KEY": devOpsKey}, - }, - Started: true, + randomID := randomString(10) + paOpts := &dockertest.RunOptions{ + Name: fmt.Sprintf("pa-%s", randomID), + Repository: "pingidentity/pingaccess", + Tag: "6.0.1-edge", + NetworkID: network.Network.ID, + Env: []string{"PING_IDENTITY_ACCEPT_EULA=YES", fmt.Sprintf("PING_IDENTITY_DEVOPS_USER=%s", devOpsUser), fmt.Sprintf("PING_IDENTITY_DEVOPS_KEY=%s", devOpsKey)}, } - pfcr := testcontainers.GenericContainerRequest{ - ContainerRequest: testcontainers.ContainerRequest{ - Image: "pingidentity/pingfederate:edge", - Networks: []string{networkName}, - ExposedPorts: []string{"9031/tcp", "9999/tcp"}, - WaitingFor: wait.ForLog("INFO [org.eclipse.jetty.server.AbstractConnector] Started ServerConnector"), - Name: "terraform-provider-pingaccess-test-pf", - Env: map[string]string{ - "PING_IDENTITY_ACCEPT_EULA": "YES", - "PING_IDENTITY_DEVOPS_USER": devOpsUser, - "PING_IDENTITY_DEVOPS_KEY": devOpsKey, - "SERVER_PROFILE_URL": "https://github.com/pingidentity/pingidentity-server-profiles.git", - "SERVER_PROFILE_PATH": "getting-started/pingfederate", - }, - NetworkAliases: map[string][]string{ - networkName: { - "pf", - }, - }, + pfOpts := &dockertest.RunOptions{ + Name: fmt.Sprintf("pf-%s", randomID), + Repository: "pingidentity/pingfederate", + Tag: "10.0.2-edge", + NetworkID: network.Network.ID, + Env: []string{ + "PING_IDENTITY_ACCEPT_EULA=YES", + fmt.Sprintf("PING_IDENTITY_DEVOPS_USER=%s", devOpsUser), + fmt.Sprintf("PING_IDENTITY_DEVOPS_KEY=%s", devOpsKey), + "SERVER_PROFILE_URL=https://github.com/pingidentity/pingidentity-server-profiles.git", + "SERVER_PROFILE_PATH=getting-started/pingfederate", }, - Started: true, } - provider, _ := gcr.ProviderType.GetProvider() - net, _ := provider.CreateNetwork(ctx, testcontainers.NetworkRequest{ - Name: networkName, - CheckDuplicate: true, - }) - - pfContainer, err := testcontainers.GenericContainer(ctx, pfcr) - paContainer, err := testcontainers.GenericContainer(ctx, gcr) + // pulls an image, creates a container based on it and runs it + paCont, err := pool.RunWithOptions(paOpts) + if err != nil { + log.Fatalf("Could not create pingaccess container: %s", err) + } + defer paCont.Close() + pfCont, err := pool.RunWithOptions(pfOpts) if err != nil { - log.Fatal(err) + log.Fatalf("Could not create pingfederate container: %s", err) } - defer net.Remove(ctx) - defer paContainer.Terminate(ctx) - defer pfContainer.Terminate(ctx) + defer pfCont.Close() - port, _ := paContainer.MappedPort(ctx, "9000") - url, _ := url.Parse(fmt.Sprintf("https://localhost:%s", port.Port())) - os.Setenv("PINGACCESS_BASEURL", url.String()) - os.Setenv("PINGACCESS_PASSWORD", "2FederateM0re") + pool.MaxWait = time.Minute * 2 + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + url, _ := url.Parse(fmt.Sprintf("https://localhost:%s", paCont.GetPort("9000/tcp"))) log.Printf("Setting PingAccess admin API: %s", url.String()) http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - client := pingaccess.NewClient("Administrator", "2FederateM0re", url, "/pa-admin-api/v3", nil) + client := pingaccess.NewClient("administrator", "2FederateM0re", url, "/pa-admin-api/v3", nil) + if err = pool.Retry(func() error { + log.Println("Attempting to connect to PingAccess admin API....") + _, _, err = client.Version.VersionCommand() + return err + }); err != nil { + log.Fatalf("Could not connect to pingaccess: %s", err) + } + os.Setenv("PINGACCESS_BASEURL", fmt.Sprintf("https://localhost:%s", paCont.GetPort("9000/tcp"))) + os.Setenv("PINGACCESS_PASSWORD", "2FederateM0re") + os.Setenv("PINGFEDERATE_TEST_IP", pfCont.GetIPInNetwork(network)) + log.Println("Connected to PingAccess admin API....") + + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} version, _, err := client.Version.VersionCommand() if err != nil { log.Fatalf("Failed to retrieve version from server: %v", err) } log.Printf("Connected to PingAccess version: %s", *version.Version) - pfPort, _ := pfContainer.MappedPort(ctx, "9999") - pfUrl, _ := url.Parse(fmt.Sprintf("https://localhost:%s", pfPort.Port())) + pfPort := pfCont.GetPort("9999/tcp") + pfUrl, _ := url.Parse(fmt.Sprintf("https://localhost:%s", pfPort)) + pfClient := &http.Client{} + if err = pool.Retry(func() error { + log.Println("Attempting to connect to PingFederate admin API....") + _, err := connectToPF(pfClient, pfUrl.String()) + return err + }); err != nil { + log.Fatalf("Could not connect to pingaccess: %s", err) + } err = setupPF(pfUrl.String()) if err != nil { log.Fatalf("Failed to setup PF server: %v", err) } log.Printf("Connected to PingFederate setup complete") + + paCont.Expire(360) + pfCont.Expire(360) + //resource.TestMain(m) code := m.Run() - log.Println("Tests complete shutting down container") + paCont.Close() + pfCont.Close() + network.Close() + log.Printf("Tests complete shutting down container") os.Exit(code) } else { m.Run() + //resource.TestMain(m) } } -var testAccProviders map[string]terraform.ResourceProvider +var testAccProviders map[string]*schema.Provider var testAccProvider *schema.Provider -var testAccProviderFactories func(providers *[]*schema.Provider) map[string]terraform.ResourceProviderFactory -var testAccTemplateProvider *schema.Provider func init() { - testAccProvider = Provider().(*schema.Provider) - //testAccTemplateProvider = template.Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ + testAccProvider = Provider() + testAccProviders = map[string]*schema.Provider{ "pingaccess": testAccProvider, - "template": testAccTemplateProvider, - } - testAccProviderFactories = func(providers *[]*schema.Provider) map[string]terraform.ResourceProviderFactory { - return map[string]terraform.ResourceProviderFactory{ - "pingaccess": func() (terraform.ResourceProvider, error) { - p := Provider() - *providers = append(*providers, p.(*schema.Provider)) - return p, nil - }, - } } } -func setupPF(adminUrl string) error { - client := &http.Client{} +func connectToPF(client *http.Client, adminUrl string) (*http.Response, error) { req, err := http.NewRequest("GET", fmt.Sprintf("%s/pf-admin-api/v1/serverSettings", adminUrl), nil) if err != nil { - return err + return nil, err } req.SetBasicAuth("Administrator", "2FederateM0re") req.Header.Add("X-Xsrf-Header", "pingfederate") resp, err := client.Do(req) if err != nil { - return err + return nil, err } if resp.StatusCode != 200 { - return errors.New("Incorrect response code from admin: " + resp.Status) + return nil, errors.New("Incorrect response code from admin: " + resp.Status) + } + return resp, nil +} + +func setupPF(adminUrl string) error { + client := &http.Client{} + resp, err := connectToPF(client, adminUrl) + if err != nil { + return err } bodyText, err := ioutil.ReadAll(resp.Body) s := string(bodyText) @@ -159,7 +176,7 @@ func setupPF(adminUrl string) error { if err != nil { return err } - req, err = http.NewRequest("PUT", fmt.Sprintf("%s/pf-admin-api/v1/serverSettings", adminUrl), bytes.NewBuffer([]byte(s))) + req, err := http.NewRequest("PUT", fmt.Sprintf("%s/pf-admin-api/v1/serverSettings", adminUrl), bytes.NewBuffer([]byte(s))) req.SetBasicAuth("Administrator", "2FederateM0re") req.Header.Add("X-Xsrf-Header", "pingfederate") req.Header.Set("Content-Type", "application/json") @@ -174,12 +191,22 @@ func setupPF(adminUrl string) error { } func testAccPreCheck(t *testing.T) { - err := testAccProvider.Configure(terraform.NewResourceConfigRaw(nil)) + err := testAccProvider.Configure(context.TODO(), terraform.NewResourceConfigRaw(nil)) if err != nil { t.Fatal(err) } } +func randomString(length int) string { + seededRand := rand.New(rand.NewSource(time.Now().UnixNano())) + charset := "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + b := make([]byte, length) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + // assert fails the test if the condition is false. func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { if !condition { diff --git a/pingaccess/resource_pingaccess_access_token_validator.go b/pingaccess/resource_pingaccess_access_token_validator.go index ef6f2f45..85018450 100644 --- a/pingaccess/resource_pingaccess_access_token_validator.go +++ b/pingaccess/resource_pingaccess_access_token_validator.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_access_token_validator_test.go b/pingaccess/resource_pingaccess_access_token_validator_test.go index 9d100199..e2ecd08c 100644 --- a/pingaccess/resource_pingaccess_access_token_validator_test.go +++ b/pingaccess/resource_pingaccess_access_token_validator_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_acme_server.go b/pingaccess/resource_pingaccess_acme_server.go index b7482e00..87447a44 100644 --- a/pingaccess/resource_pingaccess_acme_server.go +++ b/pingaccess/resource_pingaccess_acme_server.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -47,7 +47,7 @@ func resourcePingAccessAcmeServerCreate(d *schema.ResourceData, m interface{}) e if err != nil { return fmt.Errorf("Error creating AcmeServer: %s", err) } - d.SetId(result.Id.String()) + d.SetId(*result.Id) return resourcePingAccessAcmeServerReadResult(d, &input.Body) } diff --git a/pingaccess/resource_pingaccess_acme_server_test.go b/pingaccess/resource_pingaccess_acme_server_test.go index 24a95241..32ab5933 100644 --- a/pingaccess/resource_pingaccess_acme_server_test.go +++ b/pingaccess/resource_pingaccess_acme_server_test.go @@ -5,9 +5,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_application.go b/pingaccess/resource_pingaccess_application.go index 0ca3ea8f..5ced95c2 100644 --- a/pingaccess/resource_pingaccess_application.go +++ b/pingaccess/resource_pingaccess_application.go @@ -5,7 +5,7 @@ import ( "log" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_application_resource.go b/pingaccess/resource_pingaccess_application_resource.go index fb0eec46..d1a15472 100644 --- a/pingaccess/resource_pingaccess_application_resource.go +++ b/pingaccess/resource_pingaccess_application_resource.go @@ -5,7 +5,7 @@ import ( "strconv" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_application_resource_test.go b/pingaccess/resource_pingaccess_application_resource_test.go index 061b885b..6fe9fd39 100644 --- a/pingaccess/resource_pingaccess_application_resource_test.go +++ b/pingaccess/resource_pingaccess_application_resource_test.go @@ -11,9 +11,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_application_test.go b/pingaccess/resource_pingaccess_application_test.go index be22ed1a..98745422 100644 --- a/pingaccess/resource_pingaccess_application_test.go +++ b/pingaccess/resource_pingaccess_application_test.go @@ -2,13 +2,14 @@ package pingaccess import ( "fmt" + "os" "reflect" "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -145,7 +146,7 @@ func testAccPingAccessApplicationConfig(name, context, appType string) string { } resource "pingaccess_pingfederate_runtime" "app_demo_pfr" { - issuer = "https://pf:9031" + issuer = "https://%s:9031" trusted_certificate_group_id = 2 } @@ -166,7 +167,7 @@ func testAccPingAccessApplicationConfig(name, context, appType string) string { "phone" ] } - `, name, context, appType, appType) + `, name, context, appType, appType, os.Getenv("PINGFEDERATE_TEST_IP")) } func testAccCheckPingAccessApplicationExists(n string, c int64, out *pingaccess.ApplicationView) resource.TestCheckFunc { diff --git a/pingaccess/resource_pingaccess_auth_token_management.go b/pingaccess/resource_pingaccess_auth_token_management.go index dd455d3c..028fde46 100644 --- a/pingaccess/resource_pingaccess_auth_token_management.go +++ b/pingaccess/resource_pingaccess_auth_token_management.go @@ -5,7 +5,7 @@ import ( pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourcePingAccessAuthTokenManagement() *schema.Resource { diff --git a/pingaccess/resource_pingaccess_auth_token_management_test.go b/pingaccess/resource_pingaccess_auth_token_management_test.go index bd1523f6..18ac9894 100644 --- a/pingaccess/resource_pingaccess_auth_token_management_test.go +++ b/pingaccess/resource_pingaccess_auth_token_management_test.go @@ -5,9 +5,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_authn_req_list.go b/pingaccess/resource_pingaccess_authn_req_list.go index d59fc8a9..698ffe60 100644 --- a/pingaccess/resource_pingaccess_authn_req_list.go +++ b/pingaccess/resource_pingaccess_authn_req_list.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_authn_req_list_test.go b/pingaccess/resource_pingaccess_authn_req_list_test.go index 9142908a..af96c0bc 100644 --- a/pingaccess/resource_pingaccess_authn_req_list_test.go +++ b/pingaccess/resource_pingaccess_authn_req_list_test.go @@ -5,9 +5,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_certificate.go b/pingaccess/resource_pingaccess_certificate.go index 1e1c3c66..65bd7b63 100644 --- a/pingaccess/resource_pingaccess_certificate.go +++ b/pingaccess/resource_pingaccess_certificate.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_certificate_test.go b/pingaccess/resource_pingaccess_certificate_test.go index 639be11c..25a3a7fc 100644 --- a/pingaccess/resource_pingaccess_certificate_test.go +++ b/pingaccess/resource_pingaccess_certificate_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_engine_listener.go b/pingaccess/resource_pingaccess_engine_listener.go index 0a4d3a80..c810f496 100644 --- a/pingaccess/resource_pingaccess_engine_listener.go +++ b/pingaccess/resource_pingaccess_engine_listener.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_engine_listener_test.go b/pingaccess/resource_pingaccess_engine_listener_test.go index 7c2eb596..90986a95 100644 --- a/pingaccess/resource_pingaccess_engine_listener_test.go +++ b/pingaccess/resource_pingaccess_engine_listener_test.go @@ -5,9 +5,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_hsm_provider.go b/pingaccess/resource_pingaccess_hsm_provider.go index 46858f5c..52771715 100644 --- a/pingaccess/resource_pingaccess_hsm_provider.go +++ b/pingaccess/resource_pingaccess_hsm_provider.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "encoding/json" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" "net/http" ) diff --git a/pingaccess/resource_pingaccess_hsm_provider_test.go b/pingaccess/resource_pingaccess_hsm_provider_test.go index b787656d..a98b37c7 100644 --- a/pingaccess/resource_pingaccess_hsm_provider_test.go +++ b/pingaccess/resource_pingaccess_hsm_provider_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_http_config_request_host_source.go b/pingaccess/resource_pingaccess_http_config_request_host_source.go index 5b5d1c68..0adb6de7 100644 --- a/pingaccess/resource_pingaccess_http_config_request_host_source.go +++ b/pingaccess/resource_pingaccess_http_config_request_host_source.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_http_config_request_host_source_test.go b/pingaccess/resource_pingaccess_http_config_request_host_source_test.go index f7e7b67e..58672973 100644 --- a/pingaccess/resource_pingaccess_http_config_request_host_source_test.go +++ b/pingaccess/resource_pingaccess_http_config_request_host_source_test.go @@ -5,9 +5,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_https_listener.go b/pingaccess/resource_pingaccess_https_listener.go index 2830a9c9..487a86e0 100644 --- a/pingaccess/resource_pingaccess_https_listener.go +++ b/pingaccess/resource_pingaccess_https_listener.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_https_listener_test.go b/pingaccess/resource_pingaccess_https_listener_test.go index bd583c4d..e44d9297 100644 --- a/pingaccess/resource_pingaccess_https_listener_test.go +++ b/pingaccess/resource_pingaccess_https_listener_test.go @@ -5,9 +5,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_identity_mapping.go b/pingaccess/resource_pingaccess_identity_mapping.go index a2f0336b..f868656a 100644 --- a/pingaccess/resource_pingaccess_identity_mapping.go +++ b/pingaccess/resource_pingaccess_identity_mapping.go @@ -7,7 +7,7 @@ import ( "log" "net/http" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_identity_mapping_test.go b/pingaccess/resource_pingaccess_identity_mapping_test.go index b1674919..77ca2b9b 100644 --- a/pingaccess/resource_pingaccess_identity_mapping_test.go +++ b/pingaccess/resource_pingaccess_identity_mapping_test.go @@ -5,8 +5,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_keypair.go b/pingaccess/resource_pingaccess_keypair.go index 3feb0803..609b0ff0 100644 --- a/pingaccess/resource_pingaccess_keypair.go +++ b/pingaccess/resource_pingaccess_keypair.go @@ -4,7 +4,7 @@ import ( "fmt" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -63,6 +63,7 @@ func resourcePingAccessKeyPairSchema() map[string]*schema.Schema { "hsm_provider_id": { Type: schema.TypeString, Optional: true, + Default: "0", }, "issuer_dn": &schema.Schema{ Type: schema.TypeString, @@ -187,8 +188,8 @@ func resourcePingAccessKeyPairReadResult(d *schema.ResourceData, rv *pa.KeyPairV if err := d.Set("expires", rv.Expires); err != nil { return err } - if err := d.Set("hsm_provider_id", rv.HsmProviderId); err != nil { - + if err := d.Set("hsm_provider_id", strconv.Itoa(*rv.HsmProviderId)); err != nil { + return err } if err := d.Set("issuer_dn", rv.IssuerDn); err != nil { return err diff --git a/pingaccess/resource_pingaccess_keypair_test.go b/pingaccess/resource_pingaccess_keypair_test.go index 92096491..87a48569 100644 --- a/pingaccess/resource_pingaccess_keypair_test.go +++ b/pingaccess/resource_pingaccess_keypair_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_oauth_server.go b/pingaccess/resource_pingaccess_oauth_server.go index e12959a3..14d9f776 100644 --- a/pingaccess/resource_pingaccess_oauth_server.go +++ b/pingaccess/resource_pingaccess_oauth_server.go @@ -4,7 +4,7 @@ import ( "fmt" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourcePingAccessOAuthServer() *schema.Resource { diff --git a/pingaccess/resource_pingaccess_oauth_server_test.go b/pingaccess/resource_pingaccess_oauth_server_test.go index efbbdfb5..0a5d031c 100644 --- a/pingaccess/resource_pingaccess_oauth_server_test.go +++ b/pingaccess/resource_pingaccess_oauth_server_test.go @@ -6,9 +6,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_pingfederate_admin.go b/pingaccess/resource_pingaccess_pingfederate_admin.go index c2c22623..aba2e785 100644 --- a/pingaccess/resource_pingaccess_pingfederate_admin.go +++ b/pingaccess/resource_pingaccess_pingfederate_admin.go @@ -5,7 +5,7 @@ import ( pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourcePingAccessPingFederateAdmin() *schema.Resource { diff --git a/pingaccess/resource_pingaccess_pingfederate_admin_test.go b/pingaccess/resource_pingaccess_pingfederate_admin_test.go index ad1d1aea..d90ecd21 100644 --- a/pingaccess/resource_pingaccess_pingfederate_admin_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_admin_test.go @@ -2,12 +2,13 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "os" "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -18,13 +19,13 @@ func TestAccPingAccessPingFederateAdmin(t *testing.T) { CheckDestroy: testAccCheckPingAccessPingFederateAdminDestroy, Steps: []resource.TestStep{ { - Config: testAccPingAccessPingFederateAdminConfig("pf", true), + Config: testAccPingAccessPingFederateAdminConfig(os.Getenv("PINGFEDERATE_TEST_IP"), true), Check: resource.ComposeTestCheckFunc( testAccCheckPingAccessPingFederateAdminExists("pingaccess_pingfederate_admin.demo"), ), }, { - Config: testAccPingAccessPingFederateAdminConfig("pf", true), + Config: testAccPingAccessPingFederateAdminConfig(os.Getenv("PINGFEDERATE_TEST_IP"), true), Check: resource.ComposeTestCheckFunc( testAccCheckPingAccessPingFederateAdminExists("pingaccess_pingfederate_admin.demo"), ), diff --git a/pingaccess/resource_pingaccess_pingfederate_oauth.go b/pingaccess/resource_pingaccess_pingfederate_oauth.go index c71db4ac..ad7833eb 100644 --- a/pingaccess/resource_pingaccess_pingfederate_oauth.go +++ b/pingaccess/resource_pingaccess_pingfederate_oauth.go @@ -5,7 +5,7 @@ import ( pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourcePingAccessPingFederateOAuth() *schema.Resource { diff --git a/pingaccess/resource_pingaccess_pingfederate_oauth_test.go b/pingaccess/resource_pingaccess_pingfederate_oauth_test.go index 1eb5c02c..bedf6fcf 100644 --- a/pingaccess/resource_pingaccess_pingfederate_oauth_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_oauth_test.go @@ -2,12 +2,12 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_pingfederate_runtime.go b/pingaccess/resource_pingaccess_pingfederate_runtime.go index a7539a2f..8da25a22 100644 --- a/pingaccess/resource_pingaccess_pingfederate_runtime.go +++ b/pingaccess/resource_pingaccess_pingfederate_runtime.go @@ -5,7 +5,7 @@ import ( pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourcePingAccessPingFederateRuntime() *schema.Resource { diff --git a/pingaccess/resource_pingaccess_pingfederate_runtime_test.go b/pingaccess/resource_pingaccess_pingfederate_runtime_test.go index 68d1a277..f45e406d 100644 --- a/pingaccess/resource_pingaccess_pingfederate_runtime_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_runtime_test.go @@ -2,12 +2,13 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "os" "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -18,13 +19,13 @@ func TestAccPingAccessPingFederateRuntime(t *testing.T) { CheckDestroy: testAccCheckPingAccessPingFederateRuntimeDestroy, Steps: []resource.TestStep{ { - Config: testAccPingAccessPingFederateRuntimeConfig("https://pf:9031"), + Config: testAccPingAccessPingFederateRuntimeConfig(fmt.Sprintf("https://%s:9031", os.Getenv("PINGFEDERATE_TEST_IP"))), Check: resource.ComposeTestCheckFunc( testAccCheckPingAccessPingFederateRuntimeExists("pingaccess_pingfederate_runtime.demo_pfr"), ), }, { - Config: testAccPingAccessPingFederateRuntimeConfig("https://pf:9031"), + Config: testAccPingAccessPingFederateRuntimeConfig(fmt.Sprintf("https://%s:9031", os.Getenv("PINGFEDERATE_TEST_IP"))), Check: resource.ComposeTestCheckFunc( testAccCheckPingAccessPingFederateRuntimeExists("pingaccess_pingfederate_runtime.demo_pfr"), ), diff --git a/pingaccess/resource_pingaccess_rule.go b/pingaccess/resource_pingaccess_rule.go index db7c023c..3782f123 100644 --- a/pingaccess/resource_pingaccess_rule.go +++ b/pingaccess/resource_pingaccess_rule.go @@ -1,10 +1,11 @@ package pingaccess import ( + "context" "encoding/json" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -42,7 +43,7 @@ func resourcePingAccessRule() *schema.Resource { }, }, }, - CustomizeDiff: customdiff.ComputedIf("configuration", func(diff *schema.ResourceDiff, meta interface{}) bool { + CustomizeDiff: customdiff.ComputedIf("configuration", func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool { return diff.HasChange("configuration") }), } diff --git a/pingaccess/resource_pingaccess_rule_test.go b/pingaccess/resource_pingaccess_rule_test.go index 92c9e47f..cbc056f9 100644 --- a/pingaccess/resource_pingaccess_rule_test.go +++ b/pingaccess/resource_pingaccess_rule_test.go @@ -7,8 +7,8 @@ import ( "net/url" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_rulesets.go b/pingaccess/resource_pingaccess_rulesets.go index cbf7f30e..ac49bf86 100644 --- a/pingaccess/resource_pingaccess_rulesets.go +++ b/pingaccess/resource_pingaccess_rulesets.go @@ -4,7 +4,7 @@ import ( "fmt" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_rulesets_test.go b/pingaccess/resource_pingaccess_rulesets_test.go index 968347f5..c79392ee 100644 --- a/pingaccess/resource_pingaccess_rulesets_test.go +++ b/pingaccess/resource_pingaccess_rulesets_test.go @@ -7,8 +7,8 @@ import ( "net/url" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_site.go b/pingaccess/resource_pingaccess_site.go index f4c7b818..991b40ff 100644 --- a/pingaccess/resource_pingaccess_site.go +++ b/pingaccess/resource_pingaccess_site.go @@ -5,7 +5,7 @@ import ( "fmt" "net/http" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_site_authenticator.go b/pingaccess/resource_pingaccess_site_authenticator.go index da715798..2c87f9a5 100644 --- a/pingaccess/resource_pingaccess_site_authenticator.go +++ b/pingaccess/resource_pingaccess_site_authenticator.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "encoding/json" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" "net/http" ) diff --git a/pingaccess/resource_pingaccess_site_authenticator_test.go b/pingaccess/resource_pingaccess_site_authenticator_test.go index 4c7d67e2..3c24bc0a 100644 --- a/pingaccess/resource_pingaccess_site_authenticator_test.go +++ b/pingaccess/resource_pingaccess_site_authenticator_test.go @@ -12,9 +12,9 @@ import ( "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_site_test.go b/pingaccess/resource_pingaccess_site_test.go index 3320c304..2da45734 100644 --- a/pingaccess/resource_pingaccess_site_test.go +++ b/pingaccess/resource_pingaccess_site_test.go @@ -11,9 +11,9 @@ import ( "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_third_party_service.go b/pingaccess/resource_pingaccess_third_party_service.go index 8936f3ef..64fc645c 100644 --- a/pingaccess/resource_pingaccess_third_party_service.go +++ b/pingaccess/resource_pingaccess_third_party_service.go @@ -5,7 +5,7 @@ import ( "fmt" "net/http" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -93,7 +93,7 @@ func resourcePingAccessThirdPartyServiceCreate(d *schema.ResourceData, m interfa return fmt.Errorf("Error creating third party service: %s", err) } - d.SetId(result.Id.String()) + d.SetId(*result.Id) return resourcePingAccessThirdPartyServiceReadResult(d, result) } diff --git a/pingaccess/resource_pingaccess_third_party_service_test.go b/pingaccess/resource_pingaccess_third_party_service_test.go index fb0aff5e..cde341f5 100644 --- a/pingaccess/resource_pingaccess_third_party_service_test.go +++ b/pingaccess/resource_pingaccess_third_party_service_test.go @@ -5,9 +5,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_trusted_certificate_groups.go b/pingaccess/resource_pingaccess_trusted_certificate_groups.go index 23c6ad9c..9fe9e4a1 100644 --- a/pingaccess/resource_pingaccess_trusted_certificate_groups.go +++ b/pingaccess/resource_pingaccess_trusted_certificate_groups.go @@ -6,7 +6,7 @@ import ( "net/http" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_trusted_certificate_groups_test.go b/pingaccess/resource_pingaccess_trusted_certificate_groups_test.go index c9ef412d..f98fc20d 100644 --- a/pingaccess/resource_pingaccess_trusted_certificate_groups_test.go +++ b/pingaccess/resource_pingaccess_trusted_certificate_groups_test.go @@ -5,9 +5,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_virtualhost.go b/pingaccess/resource_pingaccess_virtualhost.go index c08d8e4e..768b4930 100644 --- a/pingaccess/resource_pingaccess_virtualhost.go +++ b/pingaccess/resource_pingaccess_virtualhost.go @@ -3,7 +3,7 @@ package pingaccess import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_virtualhost_test.go b/pingaccess/resource_pingaccess_virtualhost_test.go index 8feb4f20..5630b083 100644 --- a/pingaccess/resource_pingaccess_virtualhost_test.go +++ b/pingaccess/resource_pingaccess_virtualhost_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_websession.go b/pingaccess/resource_pingaccess_websession.go index 5ba691d1..5307b399 100644 --- a/pingaccess/resource_pingaccess_websession.go +++ b/pingaccess/resource_pingaccess_websession.go @@ -5,7 +5,7 @@ import ( "fmt" "net/http" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/resource_pingaccess_websession_test.go b/pingaccess/resource_pingaccess_websession_test.go index c47dce2e..ca0b5899 100644 --- a/pingaccess/resource_pingaccess_websession_test.go +++ b/pingaccess/resource_pingaccess_websession_test.go @@ -8,9 +8,9 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/pingaccess/structures.go b/pingaccess/structures.go index 243cb15e..c1ad5a9c 100644 --- a/pingaccess/structures.go +++ b/pingaccess/structures.go @@ -9,8 +9,8 @@ import ( "log" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) diff --git a/vendor/github.com/mitchellh/colorstring/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE similarity index 96% rename from vendor/github.com/mitchellh/colorstring/LICENSE rename to vendor/github.com/Azure/go-ansiterm/LICENSE index 22985159..e3d9a64d 100644 --- a/vendor/github.com/mitchellh/colorstring/LICENSE +++ b/vendor/github.com/Azure/go-ansiterm/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Mitchell Hashimoto +Copyright (c) 2015 Microsoft Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md new file mode 100644 index 00000000..261c041e --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/README.md @@ -0,0 +1,12 @@ +# go-ansiterm + +This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. + +For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. + +The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). + +See parser_test.go for examples exercising the state machine and generating appropriate function calls. + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go new file mode 100644 index 00000000..96504a33 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/constants.go @@ -0,0 +1,188 @@ +package ansiterm + +const LogEnv = "DEBUG_TERMINAL" + +// ANSI constants +// References: +// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm +// -- http://man7.org/linux/man-pages/man4/console_codes.4.html +// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +// -- http://en.wikipedia.org/wiki/ANSI_escape_code +// -- http://vt100.net/emu/dec_ansi_parser +// -- http://vt100.net/emu/vt500_parser.svg +// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html +// -- http://www.inwap.com/pdp10/ansicode.txt +const ( + // ECMA-48 Set Graphics Rendition + // Note: + // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved + // -- Fonts could possibly be supported via SetCurrentConsoleFontEx + // -- Windows does not expose the per-window cursor (i.e., caret) blink times + ANSI_SGR_RESET = 0 + ANSI_SGR_BOLD = 1 + ANSI_SGR_DIM = 2 + _ANSI_SGR_ITALIC = 3 + ANSI_SGR_UNDERLINE = 4 + _ANSI_SGR_BLINKSLOW = 5 + _ANSI_SGR_BLINKFAST = 6 + ANSI_SGR_REVERSE = 7 + _ANSI_SGR_INVISIBLE = 8 + _ANSI_SGR_LINETHROUGH = 9 + _ANSI_SGR_FONT_00 = 10 + _ANSI_SGR_FONT_01 = 11 + _ANSI_SGR_FONT_02 = 12 + _ANSI_SGR_FONT_03 = 13 + _ANSI_SGR_FONT_04 = 14 + _ANSI_SGR_FONT_05 = 15 + _ANSI_SGR_FONT_06 = 16 + _ANSI_SGR_FONT_07 = 17 + _ANSI_SGR_FONT_08 = 18 + _ANSI_SGR_FONT_09 = 19 + _ANSI_SGR_FONT_10 = 20 + _ANSI_SGR_DOUBLEUNDERLINE = 21 + ANSI_SGR_BOLD_DIM_OFF = 22 + _ANSI_SGR_ITALIC_OFF = 23 + ANSI_SGR_UNDERLINE_OFF = 24 + _ANSI_SGR_BLINK_OFF = 25 + _ANSI_SGR_RESERVED_00 = 26 + ANSI_SGR_REVERSE_OFF = 27 + _ANSI_SGR_INVISIBLE_OFF = 28 + _ANSI_SGR_LINETHROUGH_OFF = 29 + ANSI_SGR_FOREGROUND_BLACK = 30 + ANSI_SGR_FOREGROUND_RED = 31 + ANSI_SGR_FOREGROUND_GREEN = 32 + ANSI_SGR_FOREGROUND_YELLOW = 33 + ANSI_SGR_FOREGROUND_BLUE = 34 + ANSI_SGR_FOREGROUND_MAGENTA = 35 + ANSI_SGR_FOREGROUND_CYAN = 36 + ANSI_SGR_FOREGROUND_WHITE = 37 + _ANSI_SGR_RESERVED_01 = 38 + ANSI_SGR_FOREGROUND_DEFAULT = 39 + ANSI_SGR_BACKGROUND_BLACK = 40 + ANSI_SGR_BACKGROUND_RED = 41 + ANSI_SGR_BACKGROUND_GREEN = 42 + ANSI_SGR_BACKGROUND_YELLOW = 43 + ANSI_SGR_BACKGROUND_BLUE = 44 + ANSI_SGR_BACKGROUND_MAGENTA = 45 + ANSI_SGR_BACKGROUND_CYAN = 46 + ANSI_SGR_BACKGROUND_WHITE = 47 + _ANSI_SGR_RESERVED_02 = 48 + ANSI_SGR_BACKGROUND_DEFAULT = 49 + // 50 - 65: Unsupported + + ANSI_MAX_CMD_LENGTH = 4096 + + MAX_INPUT_EVENTS = 128 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 + + ANSI_BEL = 0x07 + ANSI_BACKSPACE = 0x08 + ANSI_TAB = 0x09 + ANSI_LINE_FEED = 0x0A + ANSI_VERTICAL_TAB = 0x0B + ANSI_FORM_FEED = 0x0C + ANSI_CARRIAGE_RETURN = 0x0D + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_OSC_STRING_ENTRY = 0x5D + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + DCS_ENTRY = 0x90 + CSI_ENTRY = 0x9B + OSC_STRING = 0x9D + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" + + FILL_CHARACTER = ' ' +) + +func getByteRange(start byte, end byte) []byte { + bytes := make([]byte, 0, 32) + for i := start; i <= end; i++ { + bytes = append(bytes, byte(i)) + } + + return bytes +} + +var toGroundBytes = getToGroundBytes() +var executors = getExecuteBytes() + +// SPACE 20+A0 hex Always and everywhere a blank space +// Intermediate 20-2F hex !"#$%&'()*+,-./ +var intermeds = getByteRange(0x20, 0x2F) + +// Parameters 30-3F hex 0123456789:;<=>? +// CSI Parameters 30-39, 3B hex 0123456789; +var csiParams = getByteRange(0x30, 0x3F) + +var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) + +// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ +var upperCase = getByteRange(0x40, 0x5F) + +// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ +var lowerCase = getByteRange(0x60, 0x7E) + +// Alphabetics 40-7E hex (all of upper and lower case) +var alphabetics = append(upperCase, lowerCase...) + +var printables = getByteRange(0x20, 0x7F) + +var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var escapeToGroundBytes = getEscapeToGroundBytes() + +// See http://www.vt100.net/emu/vt500_parser.png for description of the complex +// byte ranges below + +func getEscapeToGroundBytes() []byte { + escapeToGroundBytes := getByteRange(0x30, 0x4F) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) + escapeToGroundBytes = append(escapeToGroundBytes, 0x59) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) + return escapeToGroundBytes +} + +func getExecuteBytes() []byte { + executeBytes := getByteRange(0x00, 0x17) + executeBytes = append(executeBytes, 0x19) + executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) + return executeBytes +} + +func getToGroundBytes() []byte { + groundBytes := []byte{0x18} + groundBytes = append(groundBytes, 0x1A) + groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) + groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) + groundBytes = append(groundBytes, 0x99) + groundBytes = append(groundBytes, 0x9A) + groundBytes = append(groundBytes, 0x9C) + return groundBytes +} + +// Delete 7F hex Always and everywhere ignored +// C1 Control 80-9F hex 32 additional control characters +// G1 Displayable A1-FE hex 94 additional displayable characters +// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go new file mode 100644 index 00000000..8d66e777 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/context.go @@ -0,0 +1,7 @@ +package ansiterm + +type ansiContext struct { + currentChar byte + paramBuffer []byte + interBuffer []byte +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go new file mode 100644 index 00000000..bcbe00d0 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -0,0 +1,49 @@ +package ansiterm + +type csiEntryState struct { + baseState +} + +func (csiState csiEntryState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiEntry::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + return csiState.parser.csiParam, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiEntryState) Transition(s state) error { + csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + case csiState.parser.csiParam: + switch { + case sliceContains(csiParams, csiState.parser.context.currentChar): + csiState.parser.collectParam() + case sliceContains(intermeds, csiState.parser.context.currentChar): + csiState.parser.collectInter() + } + } + + return nil +} + +func (csiState csiEntryState) Enter() error { + csiState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go new file mode 100644 index 00000000..7ed5e01c --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -0,0 +1,38 @@ +package ansiterm + +type csiParamState struct { + baseState +} + +func (csiState csiParamState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiParam::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + csiState.parser.collectParam() + return csiState, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiParamState) Transition(s state) error { + csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go new file mode 100644 index 00000000..1c719db9 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -0,0 +1,36 @@ +package ansiterm + +type escapeIntermediateState struct { + baseState +} + +func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeIntermediateState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(intermeds, b): + return escState, escState.parser.collectInter() + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeIntermediateToGroundBytes, b): + return escState.parser.ground, nil + } + + return escState, nil +} + +func (escState escapeIntermediateState) Transition(s state) error { + escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go new file mode 100644 index 00000000..6390abd2 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -0,0 +1,47 @@ +package ansiterm + +type escapeState struct { + baseState +} + +func (escState escapeState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case b == ANSI_ESCAPE_SECONDARY: + return escState.parser.csiEntry, nil + case b == ANSI_OSC_STRING_ENTRY: + return escState.parser.oscString, nil + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeToGroundBytes, b): + return escState.parser.ground, nil + case sliceContains(intermeds, b): + return escState.parser.escapeIntermediate, nil + } + + return escState, nil +} + +func (escState escapeState) Transition(s state) error { + escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + case escState.parser.escapeIntermediate: + return escState.parser.collectInter() + } + + return nil +} + +func (escState escapeState) Enter() error { + escState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go new file mode 100644 index 00000000..98087b38 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go @@ -0,0 +1,90 @@ +package ansiterm + +type AnsiEventHandler interface { + // Print + Print(b byte) error + + // Execute C0 commands + Execute(b byte) error + + // CUrsor Up + CUU(int) error + + // CUrsor Down + CUD(int) error + + // CUrsor Forward + CUF(int) error + + // CUrsor Backward + CUB(int) error + + // Cursor to Next Line + CNL(int) error + + // Cursor to Previous Line + CPL(int) error + + // Cursor Horizontal position Absolute + CHA(int) error + + // Vertical line Position Absolute + VPA(int) error + + // CUrsor Position + CUP(int, int) error + + // Horizontal and Vertical Position (depends on PUM) + HVP(int, int) error + + // Text Cursor Enable Mode + DECTCEM(bool) error + + // Origin Mode + DECOM(bool) error + + // 132 Column Mode + DECCOLM(bool) error + + // Erase in Display + ED(int) error + + // Erase in Line + EL(int) error + + // Insert Line + IL(int) error + + // Delete Line + DL(int) error + + // Insert Character + ICH(int) error + + // Delete Character + DCH(int) error + + // Set Graphics Rendition + SGR([]int) error + + // Pan Down + SU(int) error + + // Pan Up + SD(int) error + + // Device Attributes + DA([]string) error + + // Set Top and Bottom Margins + DECSTBM(int, int) error + + // Index + IND() error + + // Reverse Index + RI() error + + // Flush updates from previous commands + Flush() error +} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go new file mode 100644 index 00000000..52451e94 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -0,0 +1,24 @@ +package ansiterm + +type groundState struct { + baseState +} + +func (gs groundState) Handle(b byte) (s state, e error) { + gs.parser.context.currentChar = b + + nextState, err := gs.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(printables, b): + return gs, gs.parser.print() + + case sliceContains(executors, b): + return gs, gs.parser.execute() + } + + return gs, nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go new file mode 100644 index 00000000..593b10ab --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -0,0 +1,31 @@ +package ansiterm + +type oscStringState struct { + baseState +} + +func (oscState oscStringState) Handle(b byte) (s state, e error) { + oscState.parser.logf("OscString::Handle %#x", b) + nextState, err := oscState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case isOscStringTerminator(b): + return oscState.parser.ground, nil + } + + return oscState, nil +} + +// See below for OSC string terminators for linux +// http://man7.org/linux/man-pages/man4/console_codes.4.html +func isOscStringTerminator(b byte) bool { + + if b == ANSI_BEL || b == 0x5C { + return true + } + + return false +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go new file mode 100644 index 00000000..03cec7ad --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -0,0 +1,151 @@ +package ansiterm + +import ( + "errors" + "log" + "os" +) + +type AnsiParser struct { + currState state + eventHandler AnsiEventHandler + context *ansiContext + csiEntry state + csiParam state + dcsEntry state + escape state + escapeIntermediate state + error state + ground state + oscString state + stateMap []state + + logf func(string, ...interface{}) +} + +type Option func(*AnsiParser) + +func WithLogf(f func(string, ...interface{})) Option { + return func(ap *AnsiParser) { + ap.logf = f + } +} + +func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { + ap := &AnsiParser{ + eventHandler: evtHandler, + context: &ansiContext{}, + } + for _, o := range opts { + o(ap) + } + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("ansiParser.log") + logger := log.New(logFile, "", log.LstdFlags) + if ap.logf != nil { + l := ap.logf + ap.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + ap.logf = logger.Printf + } + } + + if ap.logf == nil { + ap.logf = func(string, ...interface{}) {} + } + + ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} + ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} + ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} + ap.escape = escapeState{baseState{name: "Escape", parser: ap}} + ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} + ap.error = errorState{baseState{name: "Error", parser: ap}} + ap.ground = groundState{baseState{name: "Ground", parser: ap}} + ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} + + ap.stateMap = []state{ + ap.csiEntry, + ap.csiParam, + ap.dcsEntry, + ap.escape, + ap.escapeIntermediate, + ap.error, + ap.ground, + ap.oscString, + } + + ap.currState = getState(initialState, ap.stateMap) + + ap.logf("CreateParser: parser %p", ap) + return ap +} + +func getState(name string, states []state) state { + for _, el := range states { + if el.Name() == name { + return el + } + } + + return nil +} + +func (ap *AnsiParser) Parse(bytes []byte) (int, error) { + for i, b := range bytes { + if err := ap.handle(b); err != nil { + return i, err + } + } + + return len(bytes), ap.eventHandler.Flush() +} + +func (ap *AnsiParser) handle(b byte) error { + ap.context.currentChar = b + newState, err := ap.currState.Handle(b) + if err != nil { + return err + } + + if newState == nil { + ap.logf("WARNING: newState is nil") + return errors.New("New state of 'nil' is invalid.") + } + + if newState != ap.currState { + if err := ap.changeState(newState); err != nil { + return err + } + } + + return nil +} + +func (ap *AnsiParser) changeState(newState state) error { + ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + + // Exit old state + if err := ap.currState.Exit(); err != nil { + ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + return err + } + + // Perform transition action + if err := ap.currState.Transition(newState); err != nil { + ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + return err + } + + // Enter new state + if err := newState.Enter(); err != nil { + ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) + return err + } + + ap.currState = newState + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go new file mode 100644 index 00000000..de0a1f9c --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -0,0 +1,99 @@ +package ansiterm + +import ( + "strconv" +) + +func parseParams(bytes []byte) ([]string, error) { + paramBuff := make([]byte, 0, 0) + params := []string{} + + for _, v := range bytes { + if v == ';' { + if len(paramBuff) > 0 { + // Completed parameter, append it to the list + s := string(paramBuff) + params = append(params, s) + paramBuff = make([]byte, 0, 0) + } + } else { + paramBuff = append(paramBuff, v) + } + } + + // Last parameter may not be terminated with ';' + if len(paramBuff) > 0 { + s := string(paramBuff) + params = append(params, s) + } + + return params, nil +} + +func parseCmd(context ansiContext) (string, error) { + return string(context.currentChar), nil +} + +func getInt(params []string, dflt int) int { + i := getInts(params, 1, dflt)[0] + return i +} + +func getInts(params []string, minCount int, dflt int) []int { + ints := []int{} + + for _, v := range params { + i, _ := strconv.Atoi(v) + // Zero is mapped to the default value in VT100. + if i == 0 { + i = dflt + } + ints = append(ints, i) + } + + if len(ints) < minCount { + remaining := minCount - len(ints) + for i := 0; i < remaining; i++ { + ints = append(ints, dflt) + } + } + + return ints +} + +func (ap *AnsiParser) modeDispatch(param string, set bool) error { + switch param { + case "?3": + return ap.eventHandler.DECCOLM(set) + case "?6": + return ap.eventHandler.DECOM(set) + case "?25": + return ap.eventHandler.DECTCEM(set) + } + return nil +} + +func (ap *AnsiParser) hDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], true) + } + + return nil +} + +func (ap *AnsiParser) lDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], false) + } + + return nil +} + +func getEraseParam(params []string) int { + param := getInt(params, 0) + if param < 0 || 3 < param { + param = 0 + } + + return param +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go new file mode 100644 index 00000000..0bb5e51e --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -0,0 +1,119 @@ +package ansiterm + +func (ap *AnsiParser) collectParam() error { + currChar := ap.context.currentChar + ap.logf("collectParam %#x", currChar) + ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) + return nil +} + +func (ap *AnsiParser) collectInter() error { + currChar := ap.context.currentChar + ap.logf("collectInter %#x", currChar) + ap.context.paramBuffer = append(ap.context.interBuffer, currChar) + return nil +} + +func (ap *AnsiParser) escDispatch() error { + cmd, _ := parseCmd(*ap.context) + intermeds := ap.context.interBuffer + ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) + ap.logf("escDispatch: %v(%v)", cmd, intermeds) + + switch cmd { + case "D": // IND + return ap.eventHandler.IND() + case "E": // NEL, equivalent to CRLF + err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) + if err == nil { + err = ap.eventHandler.Execute(ANSI_LINE_FEED) + } + return err + case "M": // RI + return ap.eventHandler.RI() + } + + return nil +} + +func (ap *AnsiParser) csiDispatch() error { + cmd, _ := parseCmd(*ap.context) + params, _ := parseParams(ap.context.paramBuffer) + ap.logf("Parsed params: %v with length: %d", params, len(params)) + + ap.logf("csiDispatch: %v(%v)", cmd, params) + + switch cmd { + case "@": + return ap.eventHandler.ICH(getInt(params, 1)) + case "A": + return ap.eventHandler.CUU(getInt(params, 1)) + case "B": + return ap.eventHandler.CUD(getInt(params, 1)) + case "C": + return ap.eventHandler.CUF(getInt(params, 1)) + case "D": + return ap.eventHandler.CUB(getInt(params, 1)) + case "E": + return ap.eventHandler.CNL(getInt(params, 1)) + case "F": + return ap.eventHandler.CPL(getInt(params, 1)) + case "G": + return ap.eventHandler.CHA(getInt(params, 1)) + case "H": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.CUP(x, y) + case "J": + param := getEraseParam(params) + return ap.eventHandler.ED(param) + case "K": + param := getEraseParam(params) + return ap.eventHandler.EL(param) + case "L": + return ap.eventHandler.IL(getInt(params, 1)) + case "M": + return ap.eventHandler.DL(getInt(params, 1)) + case "P": + return ap.eventHandler.DCH(getInt(params, 1)) + case "S": + return ap.eventHandler.SU(getInt(params, 1)) + case "T": + return ap.eventHandler.SD(getInt(params, 1)) + case "c": + return ap.eventHandler.DA(params) + case "d": + return ap.eventHandler.VPA(getInt(params, 1)) + case "f": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.HVP(x, y) + case "h": + return ap.hDispatch(params) + case "l": + return ap.lDispatch(params) + case "m": + return ap.eventHandler.SGR(getInts(params, 1, 0)) + case "r": + ints := getInts(params, 2, 1) + top, bottom := ints[0], ints[1] + return ap.eventHandler.DECSTBM(top, bottom) + default: + ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) + return nil + } + +} + +func (ap *AnsiParser) print() error { + return ap.eventHandler.Print(ap.context.currentChar) +} + +func (ap *AnsiParser) clear() error { + ap.context = &ansiContext{} + return nil +} + +func (ap *AnsiParser) execute() error { + return ap.eventHandler.Execute(ap.context.currentChar) +} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go new file mode 100644 index 00000000..f2ea1fcd --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/states.go @@ -0,0 +1,71 @@ +package ansiterm + +type stateID int + +type state interface { + Enter() error + Exit() error + Handle(byte) (state, error) + Name() string + Transition(state) error +} + +type baseState struct { + name string + parser *AnsiParser +} + +func (base baseState) Enter() error { + return nil +} + +func (base baseState) Exit() error { + return nil +} + +func (base baseState) Handle(b byte) (s state, e error) { + + switch { + case b == CSI_ENTRY: + return base.parser.csiEntry, nil + case b == DCS_ENTRY: + return base.parser.dcsEntry, nil + case b == ANSI_ESCAPE_PRIMARY: + return base.parser.escape, nil + case b == OSC_STRING: + return base.parser.oscString, nil + case sliceContains(toGroundBytes, b): + return base.parser.ground, nil + } + + return nil, nil +} + +func (base baseState) Name() string { + return base.name +} + +func (base baseState) Transition(s state) error { + if s == base.parser.ground { + execBytes := []byte{0x18} + execBytes = append(execBytes, 0x1A) + execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) + execBytes = append(execBytes, getByteRange(0x91, 0x97)...) + execBytes = append(execBytes, 0x99) + execBytes = append(execBytes, 0x9A) + + if sliceContains(execBytes, base.parser.context.currentChar) { + return base.parser.execute() + } + } + + return nil +} + +type dcsEntryState struct { + baseState +} + +type errorState struct { + baseState +} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go new file mode 100644 index 00000000..39211449 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/utilities.go @@ -0,0 +1,21 @@ +package ansiterm + +import ( + "strconv" +) + +func sliceContains(bytes []byte, b byte) bool { + for _, v := range bytes { + if v == b { + return true + } + } + + return false +} + +func convertBytesToInteger(bytes []byte) int { + s := string(bytes) + i, _ := strconv.Atoi(s) + return i +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go new file mode 100644 index 00000000..a6732797 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -0,0 +1,182 @@ +// +build windows + +package winterm + +import ( + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "github.com/Azure/go-ansiterm" +) + +// Windows keyboard constants +// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key + + RIGHT_ALT_PRESSED = 0x0001 + LEFT_ALT_PRESSED = 0x0002 + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + SHIFT_PRESSED = 0x0010 + NUMLOCK_ON = 0x0020 + SCROLLLOCK_ON = 0x0040 + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 +) + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func newAnsiCommand(command []byte) *ansiCommand { + + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + + // last char is command character + lastCharIndex := len(command) - 1 + + ac := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) + } + + return ac +} + +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { + if index < 0 || index >= len(ac.Parameters) { + return defaultValue + } + + param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) + if err != nil { + return defaultValue + } + + return int16(param) +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. +// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. +func isAnsiCommandChar(b byte) bool { + switch { + case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: + return true + case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) +} + +// bytesToHex converts a slice of bytes to a human-readable string. +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +// ensureInRange adjusts the passed value, if necessary, to ensure it is within +// the passed min / max range. +func ensureInRange(n int16, min int16, max int16) int16 { + if n < min { + return min + } else if n > max { + return max + } else { + return n + } +} + +func GetStdFile(nFile int) (*os.File, uintptr) { + var file *os.File + switch nFile { + case syscall.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } + + fd, err := syscall.GetStdHandle(nFile) + if err != nil { + panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) + } + + return file, uintptr(fd) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go new file mode 100644 index 00000000..6055e33b --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -0,0 +1,327 @@ +// +build windows + +package winterm + +import ( + "fmt" + "syscall" + "unsafe" +) + +//=========================================================================================================== +// IMPORTANT NOTE: +// +// The methods below make extensive use of the "unsafe" package to obtain the required pointers. +// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack +// variables) the pointers reference *before* the API completes. +// +// As a result, in those cases, the code must hint that the variables remain in active by invoking the +// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer +// require unsafe pointers. +// +// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform +// the garbage collector the variables remain in use if: +// +// -- The value is not a pointer (e.g., int32, struct) +// -- The value is not referenced by the method after passing the pointer to Windows +// +// See http://golang.org/doc/go1.3. +//=========================================================================================================== + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") + scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") +) + +// Windows Console constants +const ( + // Console modes + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_AUTO_POSITION = 0x0100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 + + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + DISABLE_NEWLINE_AUTO_RETURN = 0x0008 + ENABLE_LVB_GRID_WORLDWIDE = 0x0010 + + // Character attributes + // Note: + // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). + // Clearing all foreground or background colors results in black; setting all creates white. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. + FOREGROUND_BLUE uint16 = 0x0001 + FOREGROUND_GREEN uint16 = 0x0002 + FOREGROUND_RED uint16 = 0x0004 + FOREGROUND_INTENSITY uint16 = 0x0008 + FOREGROUND_MASK uint16 = 0x000F + + BACKGROUND_BLUE uint16 = 0x0010 + BACKGROUND_GREEN uint16 = 0x0020 + BACKGROUND_RED uint16 = 0x0040 + BACKGROUND_INTENSITY uint16 = 0x0080 + BACKGROUND_MASK uint16 = 0x00F0 + + COMMON_LVB_MASK uint16 = 0xFF00 + COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 + COMMON_LVB_UNDERSCORE uint16 = 0x8000 + + // Input event types + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + KEY_EVENT = 0x0001 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 + MENU_EVENT = 0x0008 + FOCUS_EVENT = 0x0010 + + // WaitForSingleObject return codes + WAIT_ABANDONED = 0x00000080 + WAIT_FAILED = 0xFFFFFFFF + WAIT_SIGNALED = 0x0000000 + WAIT_TIMEOUT = 0x00000102 + + // WaitForSingleObject wait duration + WAIT_INFINITE = 0xFFFFFFFF + WAIT_ONE_SECOND = 1000 + WAIT_HALF_SECOND = 500 + WAIT_QUARTER_SECOND = 250 +) + +// Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) +// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment +type ( + CHAR_INFO struct { + UnicodeChar uint16 + Attributes uint16 + } + + CONSOLE_CURSOR_INFO struct { + Size uint32 + Visible int32 + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes uint16 + Window SMALL_RECT + MaximumWindowSize COORD + } + + COORD struct { + X int16 + Y int16 + } + + SMALL_RECT struct { + Left int16 + Top int16 + Right int16 + Bottom int16 + } + + // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + INPUT_RECORD struct { + EventType uint16 + KeyEvent KEY_EVENT_RECORD + } + + KEY_EVENT_RECORD struct { + KeyDown int32 + RepeatCount uint16 + VirtualKeyCode uint16 + VirtualScanCode uint16 + UnicodeChar uint16 + ControlKeyState uint32 + } + + WINDOW_BUFFER_SIZE struct { + Size COORD + } +) + +// boolToBOOL converts a Go bool into a Windows int32. +func boolToBOOL(f bool) int32 { + if f { + return int32(1) + } else { + return int32(0) + } +} + +// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. +func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorInfo sets the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. +func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorPosition location of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. +func SetConsoleCursorPosition(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// GetConsoleMode gets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. +func GetConsoleMode(handle uintptr) (mode uint32, err error) { + err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. +func SetConsoleMode(handle uintptr, mode uint32) error { + r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) + use(mode) + return checkError(r1, r2, err) +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + info := CONSOLE_SCREEN_BUFFER_INFO{} + err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) + if err != nil { + return nil, err + } + return &info, nil +} + +func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { + r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) + use(scrollRect) + use(clipRect) + use(destOrigin) + use(char) + return checkError(r1, r2, err) +} + +// SetConsoleScreenBufferSize sets the size of the console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. +func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// SetConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { + r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) + use(attribute) + return checkError(r1, r2, err) +} + +// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. +// Note that the size and location must be within and no larger than the backing console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. +func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { + r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) + use(isAbsolute) + use(rect) + return checkError(r1, r2, err) +} + +// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. +func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { + r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) + use(buffer) + use(bufferSize) + use(bufferCoord) + return checkError(r1, r2, err) +} + +// ReadConsoleInput reads (and removes) data from the console input buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. +func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { + r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) + use(buffer) + return checkError(r1, r2, err) +} + +// WaitForSingleObject waits for the passed handle to be signaled. +// It returns true if the handle was signaled; false otherwise. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. +func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) + switch r1 { + case WAIT_ABANDONED, WAIT_TIMEOUT: + return false, nil + case WAIT_SIGNALED: + return true, nil + } + use(msWait) + return false, err +} + +// String helpers +func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { + return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) +} + +func (coord COORD) String() string { + return fmt.Sprintf("%v,%v", coord.X, coord.Y) +} + +func (rect SMALL_RECT) String() string { + return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) +} + +// checkError evaluates the results of a Windows API call and returns the error if it failed. +func checkError(r1, r2 uintptr, err error) error { + // Windows APIs return non-zero to indicate success + if r1 != 0 { + return nil + } + + // Return the error if provided, otherwise default to EINVAL + if err != nil { + return err + } + return syscall.EINVAL +} + +// coordToPointer converts a COORD into a uintptr (by fooling the type system). +func coordToPointer(c COORD) uintptr { + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. + return uintptr(*((*uint32)(unsafe.Pointer(&c)))) +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go new file mode 100644 index 00000000..cbec8f72 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -0,0 +1,100 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +const ( + FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE +) + +// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the +// request represented by the passed ANSI mode. +func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { + switch ansiMode { + + // Mode styles + case ansiterm.ANSI_SGR_BOLD: + windowsMode = windowsMode | FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: + windowsMode &^= FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_UNDERLINE: + windowsMode = windowsMode | COMMON_LVB_UNDERSCORE + + case ansiterm.ANSI_SGR_REVERSE: + inverted = true + + case ansiterm.ANSI_SGR_REVERSE_OFF: + inverted = false + + case ansiterm.ANSI_SGR_UNDERLINE_OFF: + windowsMode &^= COMMON_LVB_UNDERSCORE + + // Foreground colors + case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: + windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_BLACK: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_RED: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED + + case ansiterm.ANSI_SGR_FOREGROUND_GREEN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_BLUE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_CYAN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_WHITE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background colors + case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: + // Black with no intensity + windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_BLACK: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_RED: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED + + case ansiterm.ANSI_SGR_BACKGROUND_GREEN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_BLUE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_CYAN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_WHITE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + + return windowsMode, inverted +} + +// invertAttributes inverts the foreground and background colors of a Windows attributes value +func invertAttributes(windowsMode uint16) uint16 { + return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go new file mode 100644 index 00000000..3ee06ea7 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -0,0 +1,101 @@ +// +build windows + +package winterm + +const ( + horizontal = iota + vertical +) + +func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { + if h.originMode { + sr := h.effectiveSr(info.Window) + return SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + } else { + return SMALL_RECT{ + Top: info.Window.Top, + Bottom: info.Window.Bottom, + Left: 0, + Right: info.Size.X - 1, + } + } +} + +// setCursorPosition sets the cursor to the specified position, bounded to the screen size +func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { + position.X = ensureInRange(position.X, window.Left, window.Right) + position.Y = ensureInRange(position.Y, window.Top, window.Bottom) + err := SetConsoleCursorPosition(h.fd, position) + if err != nil { + return err + } + h.logf("Cursor position set: (%d, %d)", position.X, position.Y) + return err +} + +func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(vertical, param) +} + +func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(horizontal, param) +} + +func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + switch moveMode { + case horizontal: + position.X += int16(param) + case vertical: + position.Y += int16(param) + } + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = 0 + position.Y += int16(param) + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = int16(param) - 1 + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go new file mode 100644 index 00000000..244b5fa2 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -0,0 +1,84 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { + // Ignore an invalid (negative area) request + if toCoord.Y < fromCoord.Y { + return nil + } + + var err error + + var coordStart = COORD{} + var coordEnd = COORD{} + + xCurrent, yCurrent := fromCoord.X, fromCoord.Y + xEnd, yEnd := toCoord.X, toCoord.Y + + // Clear any partial initial line + if xCurrent > 0 { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yCurrent + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent += 1 + } + + // Clear intervening rectangular section + if yCurrent < yEnd { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd-1 + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent = yEnd + } + + // Clear remaining partial ending line + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { + region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + + if size <= 0 { + return nil + } + + buffer := make([]CHAR_INFO, size) + + char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} + for i := 0; i < int(size); i++ { + buffer[i] = char + } + + err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go new file mode 100644 index 00000000..2d27fa1d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -0,0 +1,118 @@ +// +build windows + +package winterm + +// effectiveSr gets the current effective scroll region in buffer coordinates +func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) + if top >= bottom { + top = window.Top + bottom = window.Bottom + } + return scrollRegion{top: top, bottom: bottom} +} + +func (h *windowsAnsiEventHandler) scrollUp(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + return h.scroll(param, sr, info) +} + +func (h *windowsAnsiEventHandler) scrollDown(param int) error { + return h.scrollUp(-param) +} + +func (h *windowsAnsiEventHandler) deleteLines(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + start := info.CursorPosition.Y + sr := h.effectiveSr(info.Window) + // Lines cannot be inserted or deleted outside the scrolling region. + if start >= sr.top && start <= sr.bottom { + sr.top = start + return h.scroll(param, sr, info) + } else { + return nil + } +} + +func (h *windowsAnsiEventHandler) insertLines(param int) error { + return h.deleteLines(-param) +} + +// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. +func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { + h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: 0, + Y: sr.top - int16(param), + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} + +func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + return h.scrollLine(param, info.CursorPosition, info) +} + +func (h *windowsAnsiEventHandler) insertCharacters(param int) error { + return h.deleteCharacters(-param) +} + +// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. +func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: position.Y, + Bottom: position.Y, + Left: position.X, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: position.X - int16(columns), + Y: position.Y, + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go new file mode 100644 index 00000000..afa7635d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -0,0 +1,9 @@ +// +build windows + +package winterm + +// AddInRange increments a value by the passed quantity while ensuring the values +// always remain within the supplied min / max range. +func addInRange(n int16, increment int16, min int16, max int16) int16 { + return ensureInRange(n+increment, min, max) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go new file mode 100644 index 00000000..2d40fb75 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -0,0 +1,743 @@ +// +build windows + +package winterm + +import ( + "bytes" + "log" + "os" + "strconv" + + "github.com/Azure/go-ansiterm" +) + +type windowsAnsiEventHandler struct { + fd uintptr + file *os.File + infoReset *CONSOLE_SCREEN_BUFFER_INFO + sr scrollRegion + buffer bytes.Buffer + attributes uint16 + inverted bool + wrapNext bool + drewMarginByte bool + originMode bool + marginByte byte + curInfo *CONSOLE_SCREEN_BUFFER_INFO + curPos COORD + logf func(string, ...interface{}) +} + +type Option func(*windowsAnsiEventHandler) + +func WithLogf(f func(string, ...interface{})) Option { + return func(w *windowsAnsiEventHandler) { + w.logf = f + } +} + +func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { + infoReset, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + h := &windowsAnsiEventHandler{ + fd: fd, + file: file, + infoReset: infoReset, + attributes: infoReset.Attributes, + } + for _, o := range opts { + o(h) + } + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("winEventHandler.log") + logger := log.New(logFile, "", log.LstdFlags) + if h.logf != nil { + l := h.logf + h.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + h.logf = logger.Printf + } + } + + if h.logf == nil { + h.logf = func(string, ...interface{}) {} + } + + return h +} + +type scrollRegion struct { + top int16 + bottom int16 +} + +// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the +// current cursor position and scroll region settings, in which case it returns +// true. If no special handling is necessary, then it does nothing and returns +// false. +// +// In the false case, the caller should ensure that a carriage return +// and line feed are inserted or that the text is otherwise wrapped. +func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { + if h.wrapNext { + if err := h.Flush(); err != nil { + return false, err + } + h.clearWrap() + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return false, err + } + sr := h.effectiveSr(info.Window) + if pos.Y == sr.bottom { + // Scrolling is necessary. Let Windows automatically scroll if the scrolling region + // is the full window. + if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { + if includeCR { + pos.X = 0 + h.updatePos(pos) + } + return false, nil + } + + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { + return false, err + } + h.logf("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + + } else if pos.Y < info.Window.Bottom { + // Let Windows handle the LF. + pos.Y++ + if includeCR { + pos.X = 0 + } + h.updatePos(pos) + return false, nil + } else { + // The cursor is at the bottom of the screen but outside the scroll + // region. Skip the LF. + h.logf("Simulating LF outside scroll region") + if includeCR { + if err := h.Flush(); err != nil { + return false, err + } + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } +} + +// executeLF executes a LF without a CR. +func (h *windowsAnsiEventHandler) executeLF() error { + handled, err := h.simulateLF(false) + if err != nil { + return err + } + if !handled { + // Windows LF will reset the cursor column position. Write the LF + // and restore the cursor position. + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + if pos.X != 0 { + if err := h.Flush(); err != nil { + return err + } + h.logf("Resetting cursor position for LF without CR") + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + } + return nil +} + +func (h *windowsAnsiEventHandler) Print(b byte) error { + if h.wrapNext { + h.buffer.WriteByte(h.marginByte) + h.clearWrap() + if _, err := h.simulateLF(true); err != nil { + return err + } + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X == info.Size.X-1 { + h.wrapNext = true + h.marginByte = b + } else { + pos.X++ + h.updatePos(pos) + h.buffer.WriteByte(b) + } + return nil +} + +func (h *windowsAnsiEventHandler) Execute(b byte) error { + switch b { + case ansiterm.ANSI_TAB: + h.logf("Execute(TAB)") + // Move to the next tab stop, but preserve auto-wrap if already set. + if !h.wrapNext { + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + pos.X = (pos.X + 8) - pos.X%8 + if pos.X >= info.Size.X { + pos.X = info.Size.X - 1 + } + if err := h.Flush(); err != nil { + return err + } + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + return nil + + case ansiterm.ANSI_BEL: + h.buffer.WriteByte(ansiterm.ANSI_BEL) + return nil + + case ansiterm.ANSI_BACKSPACE: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X > 0 { + pos.X-- + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) + } + return nil + + case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: + // Treat as true LF. + return h.executeLF() + + case ansiterm.ANSI_LINE_FEED: + // Simulate a CR and LF for now since there is no way in go-ansiterm + // to tell if the LF should include CR (and more things break when it's + // missing than when it's incorrectly added). + handled, err := h.simulateLF(true) + if handled || err != nil { + return err + } + return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + + case ansiterm.ANSI_CARRIAGE_RETURN: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X != 0 { + pos.X = 0 + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) + } + return nil + + default: + return nil + } +} + +func (h *windowsAnsiEventHandler) CUU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(-param) +} + +func (h *windowsAnsiEventHandler) CUD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(param) +} + +func (h *windowsAnsiEventHandler) CUF(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(param) +} + +func (h *windowsAnsiEventHandler) CUB(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(-param) +} + +func (h *windowsAnsiEventHandler) CNL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(param) +} + +func (h *windowsAnsiEventHandler) CPL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(-param) +} + +func (h *windowsAnsiEventHandler) CHA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorColumn(param) +} + +func (h *windowsAnsiEventHandler) VPA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("VPA: [[%d]]", param) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + window := h.getCursorWindow(info) + position := info.CursorPosition + position.Y = window.Top + int16(param) - 1 + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) CUP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUP: [[%d %d]]", row, col) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + window := h.getCursorWindow(info) + position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) HVP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("HVP: [[%d %d]]", row, col) + h.clearWrap() + return h.CUP(row, col) +} + +func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.clearWrap() + return nil +} + +func (h *windowsAnsiEventHandler) DECOM(enable bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.clearWrap() + h.originMode = enable + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.clearWrap() + if err := h.ED(2); err != nil { + return err + } + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + targetWidth := int16(80) + if use132 { + targetWidth = 132 + } + if info.Size.X < targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + window := info.Window + window.Left = 0 + window.Right = targetWidth - 1 + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + h.logf("set window failed: %v", err) + return err + } + if info.Size.X > targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + return SetConsoleCursorPosition(h.fd, COORD{0, 0}) +} + +func (h *windowsAnsiEventHandler) ED(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ED: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + + // [J -- Erases from the cursor to the end of the screen, including the cursor position. + // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J -- Erases the complete display. The cursor does not move. + // Notes: + // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X - 1, info.Size.Y - 1} + + case 1: + start = COORD{0, 0} + end = info.CursorPosition + + case 2: + start = COORD{0, 0} + end = COORD{info.Size.X - 1, info.Size.Y - 1} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + // If the whole buffer was cleared, move the window to the top while preserving + // the window-relative cursor position. + if param == 2 { + pos := info.CursorPosition + window := info.Window + pos.Y -= window.Top + window.Bottom -= window.Top + window.Top = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + return err + } + } + + return nil +} + +func (h *windowsAnsiEventHandler) EL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("EL: [%v]", strconv.Itoa(param)) + h.clearWrap() + + // [K -- Erases from the cursor to the end of the line, including the cursor position. + // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. + // [2K -- Erases the complete line. + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X, info.CursorPosition.Y} + + case 1: + start = COORD{0, info.CursorPosition.Y} + end = info.CursorPosition + + case 2: + start = COORD{0, info.CursorPosition.Y} + end = COORD{info.Size.X, info.CursorPosition.Y} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) IL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("IL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertLines(param) +} + +func (h *windowsAnsiEventHandler) DL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteLines(param) +} + +func (h *windowsAnsiEventHandler) ICH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ICH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertCharacters(param) +} + +func (h *windowsAnsiEventHandler) DCH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DCH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteCharacters(param) +} + +func (h *windowsAnsiEventHandler) SGR(params []int) error { + if err := h.Flush(); err != nil { + return err + } + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + h.logf("SGR: [%v]", strings) + + if len(params) <= 0 { + h.attributes = h.infoReset.Attributes + h.inverted = false + } else { + for _, attr := range params { + + if attr == ansiterm.ANSI_SGR_RESET { + h.attributes = h.infoReset.Attributes + h.inverted = false + continue + } + + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) + } + } + + attributes := h.attributes + if h.inverted { + attributes = invertAttributes(attributes) + } + err := SetConsoleTextAttribute(h.fd, attributes) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) SU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollUp(param) +} + +func (h *windowsAnsiEventHandler) SD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollDown(param) +} + +func (h *windowsAnsiEventHandler) DA(params []string) error { + h.logf("DA: [%v]", params) + // DA cannot be implemented because it must send data on the VT100 input stream, + // which is not available to go-ansiterm. + return nil +} + +func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECSTBM: [%d, %d]", top, bottom) + + // Windows is 0 indexed, Linux is 1 indexed + h.sr.top = int16(top - 1) + h.sr.bottom = int16(bottom - 1) + + // This command also moves the cursor to the origin. + h.clearWrap() + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) RI() error { + if err := h.Flush(); err != nil { + return err + } + h.logf("RI: []") + h.clearWrap() + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + if info.CursorPosition.Y == sr.top { + return h.scrollDown(1) + } + + return h.moveCursorVertical(-1) +} + +func (h *windowsAnsiEventHandler) IND() error { + h.logf("IND: []") + return h.executeLF() +} + +func (h *windowsAnsiEventHandler) Flush() error { + h.curInfo = nil + if h.buffer.Len() > 0 { + h.logf("Flush: [%s]", h.buffer.Bytes()) + if _, err := h.buffer.WriteTo(h.file); err != nil { + return err + } + } + + if h.wrapNext && !h.drewMarginByte { + h.logf("Flush: drawing margin byte '%c'", h.marginByte) + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} + size := COORD{1, 1} + position := COORD{0, 0} + region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} + if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { + return err + } + h.drewMarginByte = true + } + return nil +} + +// cacheConsoleInfo ensures that the current console screen information has been queried +// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. +func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { + if h.curInfo == nil { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return COORD{}, nil, err + } + h.curInfo = info + h.curPos = info.CursorPosition + } + return h.curPos, h.curInfo, nil +} + +func (h *windowsAnsiEventHandler) updatePos(pos COORD) { + if h.curInfo == nil { + panic("failed to call getCurrentInfo before calling updatePos") + } + h.curPos = pos +} + +// clearWrap clears the state where the cursor is in the margin +// waiting for the next character before wrapping the line. This must +// be done before most operations that act on the cursor. +func (h *windowsAnsiEventHandler) clearWrap() { + h.wrapNext = false + h.drewMarginByte = false +} diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE deleted file mode 100644 index 49d21669..00000000 --- a/vendor/github.com/Microsoft/hcsshim/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go deleted file mode 100644 index 916950c0..00000000 --- a/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go +++ /dev/null @@ -1,51 +0,0 @@ -package osversion - -import ( - "fmt" - - "golang.org/x/sys/windows" -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - -// Get gets the operating system version on Windows. -// The calling application must be manifested to get the correct version information. -func Get() OSVersion { - var err error - osv := OSVersion{} - osv.Version, err = windows.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv -} - -func (osv OSVersion) ToString() string { - return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) -} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go deleted file mode 100644 index 2d9567f6..00000000 --- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ /dev/null @@ -1,10 +0,0 @@ -package osversion - -const ( - - // RS2 was a client-only release in case you're asking why it's not in the list. - RS1 = 14393 - RS3 = 16299 - RS4 = 17134 - RS5 = 17763 -) diff --git a/vendor/github.com/Nvveen/Gotty/LICENSE b/vendor/github.com/Nvveen/Gotty/LICENSE new file mode 100644 index 00000000..0b71c973 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those +of the authors and should not be interpreted as representing official policies, +either expressed or implied, of the FreeBSD Project. diff --git a/vendor/github.com/Nvveen/Gotty/README b/vendor/github.com/Nvveen/Gotty/README new file mode 100644 index 00000000..a6b0d9a8 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/README @@ -0,0 +1,5 @@ +Gotty is a library written in Go that determines and reads termcap database +files to produce an interface for interacting with the capabilities of a +terminal. +See the godoc documentation or the source code for more information about +function usage. diff --git a/vendor/github.com/Nvveen/Gotty/TODO b/vendor/github.com/Nvveen/Gotty/TODO new file mode 100644 index 00000000..47046053 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/TODO @@ -0,0 +1,3 @@ +gotty.go:// TODO add more concurrency to name lookup, look for more opportunities. +all:// TODO add more documentation, with function usage in a doc.go file. +all:// TODO add more testing/benchmarking with go test. diff --git a/vendor/github.com/Nvveen/Gotty/attributes.go b/vendor/github.com/Nvveen/Gotty/attributes.go new file mode 100644 index 00000000..a4c005fa --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/attributes.go @@ -0,0 +1,514 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package gotty + +// Boolean capabilities +var BoolAttr = [...]string{ + "auto_left_margin", "bw", + "auto_right_margin", "am", + "no_esc_ctlc", "xsb", + "ceol_standout_glitch", "xhp", + "eat_newline_glitch", "xenl", + "erase_overstrike", "eo", + "generic_type", "gn", + "hard_copy", "hc", + "has_meta_key", "km", + "has_status_line", "hs", + "insert_null_glitch", "in", + "memory_above", "da", + "memory_below", "db", + "move_insert_mode", "mir", + "move_standout_mode", "msgr", + "over_strike", "os", + "status_line_esc_ok", "eslok", + "dest_tabs_magic_smso", "xt", + "tilde_glitch", "hz", + "transparent_underline", "ul", + "xon_xoff", "nxon", + "needs_xon_xoff", "nxon", + "prtr_silent", "mc5i", + "hard_cursor", "chts", + "non_rev_rmcup", "nrrmc", + "no_pad_char", "npc", + "non_dest_scroll_region", "ndscr", + "can_change", "ccc", + "back_color_erase", "bce", + "hue_lightness_saturation", "hls", + "col_addr_glitch", "xhpa", + "cr_cancels_micro_mode", "crxm", + "has_print_wheel", "daisy", + "row_addr_glitch", "xvpa", + "semi_auto_right_margin", "sam", + "cpi_changes_res", "cpix", + "lpi_changes_res", "lpix", + "backspaces_with_bs", "", + "crt_no_scrolling", "", + "no_correctly_working_cr", "", + "gnu_has_meta_key", "", + "linefeed_is_newline", "", + "has_hardware_tabs", "", + "return_does_clr_eol", "", +} + +// Numerical capabilities +var NumAttr = [...]string{ + "columns", "cols", + "init_tabs", "it", + "lines", "lines", + "lines_of_memory", "lm", + "magic_cookie_glitch", "xmc", + "padding_baud_rate", "pb", + "virtual_terminal", "vt", + "width_status_line", "wsl", + "num_labels", "nlab", + "label_height", "lh", + "label_width", "lw", + "max_attributes", "ma", + "maximum_windows", "wnum", + "max_colors", "colors", + "max_pairs", "pairs", + "no_color_video", "ncv", + "buffer_capacity", "bufsz", + "dot_vert_spacing", "spinv", + "dot_horz_spacing", "spinh", + "max_micro_address", "maddr", + "max_micro_jump", "mjump", + "micro_col_size", "mcs", + "micro_line_size", "mls", + "number_of_pins", "npins", + "output_res_char", "orc", + "output_res_line", "orl", + "output_res_horz_inch", "orhi", + "output_res_vert_inch", "orvi", + "print_rate", "cps", + "wide_char_size", "widcs", + "buttons", "btns", + "bit_image_entwining", "bitwin", + "bit_image_type", "bitype", + "magic_cookie_glitch_ul", "", + "carriage_return_delay", "", + "new_line_delay", "", + "backspace_delay", "", + "horizontal_tab_delay", "", + "number_of_function_keys", "", +} + +// String capabilities +var StrAttr = [...]string{ + "back_tab", "cbt", + "bell", "bel", + "carriage_return", "cr", + "change_scroll_region", "csr", + "clear_all_tabs", "tbc", + "clear_screen", "clear", + "clr_eol", "el", + "clr_eos", "ed", + "column_address", "hpa", + "command_character", "cmdch", + "cursor_address", "cup", + "cursor_down", "cud1", + "cursor_home", "home", + "cursor_invisible", "civis", + "cursor_left", "cub1", + "cursor_mem_address", "mrcup", + "cursor_normal", "cnorm", + "cursor_right", "cuf1", + "cursor_to_ll", "ll", + "cursor_up", "cuu1", + "cursor_visible", "cvvis", + "delete_character", "dch1", + "delete_line", "dl1", + "dis_status_line", "dsl", + "down_half_line", "hd", + "enter_alt_charset_mode", "smacs", + "enter_blink_mode", "blink", + "enter_bold_mode", "bold", + "enter_ca_mode", "smcup", + "enter_delete_mode", "smdc", + "enter_dim_mode", "dim", + "enter_insert_mode", "smir", + "enter_secure_mode", "invis", + "enter_protected_mode", "prot", + "enter_reverse_mode", "rev", + "enter_standout_mode", "smso", + "enter_underline_mode", "smul", + "erase_chars", "ech", + "exit_alt_charset_mode", "rmacs", + "exit_attribute_mode", "sgr0", + "exit_ca_mode", "rmcup", + "exit_delete_mode", "rmdc", + "exit_insert_mode", "rmir", + "exit_standout_mode", "rmso", + "exit_underline_mode", "rmul", + "flash_screen", "flash", + "form_feed", "ff", + "from_status_line", "fsl", + "init_1string", "is1", + "init_2string", "is2", + "init_3string", "is3", + "init_file", "if", + "insert_character", "ich1", + "insert_line", "il1", + "insert_padding", "ip", + "key_backspace", "kbs", + "key_catab", "ktbc", + "key_clear", "kclr", + "key_ctab", "kctab", + "key_dc", "kdch1", + "key_dl", "kdl1", + "key_down", "kcud1", + "key_eic", "krmir", + "key_eol", "kel", + "key_eos", "ked", + "key_f0", "kf0", + "key_f1", "kf1", + "key_f10", "kf10", + "key_f2", "kf2", + "key_f3", "kf3", + "key_f4", "kf4", + "key_f5", "kf5", + "key_f6", "kf6", + "key_f7", "kf7", + "key_f8", "kf8", + "key_f9", "kf9", + "key_home", "khome", + "key_ic", "kich1", + "key_il", "kil1", + "key_left", "kcub1", + "key_ll", "kll", + "key_npage", "knp", + "key_ppage", "kpp", + "key_right", "kcuf1", + "key_sf", "kind", + "key_sr", "kri", + "key_stab", "khts", + "key_up", "kcuu1", + "keypad_local", "rmkx", + "keypad_xmit", "smkx", + "lab_f0", "lf0", + "lab_f1", "lf1", + "lab_f10", "lf10", + "lab_f2", "lf2", + "lab_f3", "lf3", + "lab_f4", "lf4", + "lab_f5", "lf5", + "lab_f6", "lf6", + "lab_f7", "lf7", + "lab_f8", "lf8", + "lab_f9", "lf9", + "meta_off", "rmm", + "meta_on", "smm", + "newline", "_glitch", + "pad_char", "npc", + "parm_dch", "dch", + "parm_delete_line", "dl", + "parm_down_cursor", "cud", + "parm_ich", "ich", + "parm_index", "indn", + "parm_insert_line", "il", + "parm_left_cursor", "cub", + "parm_right_cursor", "cuf", + "parm_rindex", "rin", + "parm_up_cursor", "cuu", + "pkey_key", "pfkey", + "pkey_local", "pfloc", + "pkey_xmit", "pfx", + "print_screen", "mc0", + "prtr_off", "mc4", + "prtr_on", "mc5", + "repeat_char", "rep", + "reset_1string", "rs1", + "reset_2string", "rs2", + "reset_3string", "rs3", + "reset_file", "rf", + "restore_cursor", "rc", + "row_address", "mvpa", + "save_cursor", "row_address", + "scroll_forward", "ind", + "scroll_reverse", "ri", + "set_attributes", "sgr", + "set_tab", "hts", + "set_window", "wind", + "tab", "s_magic_smso", + "to_status_line", "tsl", + "underline_char", "uc", + "up_half_line", "hu", + "init_prog", "iprog", + "key_a1", "ka1", + "key_a3", "ka3", + "key_b2", "kb2", + "key_c1", "kc1", + "key_c3", "kc3", + "prtr_non", "mc5p", + "char_padding", "rmp", + "acs_chars", "acsc", + "plab_norm", "pln", + "key_btab", "kcbt", + "enter_xon_mode", "smxon", + "exit_xon_mode", "rmxon", + "enter_am_mode", "smam", + "exit_am_mode", "rmam", + "xon_character", "xonc", + "xoff_character", "xoffc", + "ena_acs", "enacs", + "label_on", "smln", + "label_off", "rmln", + "key_beg", "kbeg", + "key_cancel", "kcan", + "key_close", "kclo", + "key_command", "kcmd", + "key_copy", "kcpy", + "key_create", "kcrt", + "key_end", "kend", + "key_enter", "kent", + "key_exit", "kext", + "key_find", "kfnd", + "key_help", "khlp", + "key_mark", "kmrk", + "key_message", "kmsg", + "key_move", "kmov", + "key_next", "knxt", + "key_open", "kopn", + "key_options", "kopt", + "key_previous", "kprv", + "key_print", "kprt", + "key_redo", "krdo", + "key_reference", "kref", + "key_refresh", "krfr", + "key_replace", "krpl", + "key_restart", "krst", + "key_resume", "kres", + "key_save", "ksav", + "key_suspend", "kspd", + "key_undo", "kund", + "key_sbeg", "kBEG", + "key_scancel", "kCAN", + "key_scommand", "kCMD", + "key_scopy", "kCPY", + "key_screate", "kCRT", + "key_sdc", "kDC", + "key_sdl", "kDL", + "key_select", "kslt", + "key_send", "kEND", + "key_seol", "kEOL", + "key_sexit", "kEXT", + "key_sfind", "kFND", + "key_shelp", "kHLP", + "key_shome", "kHOM", + "key_sic", "kIC", + "key_sleft", "kLFT", + "key_smessage", "kMSG", + "key_smove", "kMOV", + "key_snext", "kNXT", + "key_soptions", "kOPT", + "key_sprevious", "kPRV", + "key_sprint", "kPRT", + "key_sredo", "kRDO", + "key_sreplace", "kRPL", + "key_sright", "kRIT", + "key_srsume", "kRES", + "key_ssave", "kSAV", + "key_ssuspend", "kSPD", + "key_sundo", "kUND", + "req_for_input", "rfi", + "key_f11", "kf11", + "key_f12", "kf12", + "key_f13", "kf13", + "key_f14", "kf14", + "key_f15", "kf15", + "key_f16", "kf16", + "key_f17", "kf17", + "key_f18", "kf18", + "key_f19", "kf19", + "key_f20", "kf20", + "key_f21", "kf21", + "key_f22", "kf22", + "key_f23", "kf23", + "key_f24", "kf24", + "key_f25", "kf25", + "key_f26", "kf26", + "key_f27", "kf27", + "key_f28", "kf28", + "key_f29", "kf29", + "key_f30", "kf30", + "key_f31", "kf31", + "key_f32", "kf32", + "key_f33", "kf33", + "key_f34", "kf34", + "key_f35", "kf35", + "key_f36", "kf36", + "key_f37", "kf37", + "key_f38", "kf38", + "key_f39", "kf39", + "key_f40", "kf40", + "key_f41", "kf41", + "key_f42", "kf42", + "key_f43", "kf43", + "key_f44", "kf44", + "key_f45", "kf45", + "key_f46", "kf46", + "key_f47", "kf47", + "key_f48", "kf48", + "key_f49", "kf49", + "key_f50", "kf50", + "key_f51", "kf51", + "key_f52", "kf52", + "key_f53", "kf53", + "key_f54", "kf54", + "key_f55", "kf55", + "key_f56", "kf56", + "key_f57", "kf57", + "key_f58", "kf58", + "key_f59", "kf59", + "key_f60", "kf60", + "key_f61", "kf61", + "key_f62", "kf62", + "key_f63", "kf63", + "clr_bol", "el1", + "clear_margins", "mgc", + "set_left_margin", "smgl", + "set_right_margin", "smgr", + "label_format", "fln", + "set_clock", "sclk", + "display_clock", "dclk", + "remove_clock", "rmclk", + "create_window", "cwin", + "goto_window", "wingo", + "hangup", "hup", + "dial_phone", "dial", + "quick_dial", "qdial", + "tone", "tone", + "pulse", "pulse", + "flash_hook", "hook", + "fixed_pause", "pause", + "wait_tone", "wait", + "user0", "u0", + "user1", "u1", + "user2", "u2", + "user3", "u3", + "user4", "u4", + "user5", "u5", + "user6", "u6", + "user7", "u7", + "user8", "u8", + "user9", "u9", + "orig_pair", "op", + "orig_colors", "oc", + "initialize_color", "initc", + "initialize_pair", "initp", + "set_color_pair", "scp", + "set_foreground", "setf", + "set_background", "setb", + "change_char_pitch", "cpi", + "change_line_pitch", "lpi", + "change_res_horz", "chr", + "change_res_vert", "cvr", + "define_char", "defc", + "enter_doublewide_mode", "swidm", + "enter_draft_quality", "sdrfq", + "enter_italics_mode", "sitm", + "enter_leftward_mode", "slm", + "enter_micro_mode", "smicm", + "enter_near_letter_quality", "snlq", + "enter_normal_quality", "snrmq", + "enter_shadow_mode", "sshm", + "enter_subscript_mode", "ssubm", + "enter_superscript_mode", "ssupm", + "enter_upward_mode", "sum", + "exit_doublewide_mode", "rwidm", + "exit_italics_mode", "ritm", + "exit_leftward_mode", "rlm", + "exit_micro_mode", "rmicm", + "exit_shadow_mode", "rshm", + "exit_subscript_mode", "rsubm", + "exit_superscript_mode", "rsupm", + "exit_upward_mode", "rum", + "micro_column_address", "mhpa", + "micro_down", "mcud1", + "micro_left", "mcub1", + "micro_right", "mcuf1", + "micro_row_address", "mvpa", + "micro_up", "mcuu1", + "order_of_pins", "porder", + "parm_down_micro", "mcud", + "parm_left_micro", "mcub", + "parm_right_micro", "mcuf", + "parm_up_micro", "mcuu", + "select_char_set", "scs", + "set_bottom_margin", "smgb", + "set_bottom_margin_parm", "smgbp", + "set_left_margin_parm", "smglp", + "set_right_margin_parm", "smgrp", + "set_top_margin", "smgt", + "set_top_margin_parm", "smgtp", + "start_bit_image", "sbim", + "start_char_set_def", "scsd", + "stop_bit_image", "rbim", + "stop_char_set_def", "rcsd", + "subscript_characters", "subcs", + "superscript_characters", "supcs", + "these_cause_cr", "docr", + "zero_motion", "zerom", + "char_set_names", "csnm", + "key_mouse", "kmous", + "mouse_info", "minfo", + "req_mouse_pos", "reqmp", + "get_mouse", "getm", + "set_a_foreground", "setaf", + "set_a_background", "setab", + "pkey_plab", "pfxl", + "device_type", "devt", + "code_set_init", "csin", + "set0_des_seq", "s0ds", + "set1_des_seq", "s1ds", + "set2_des_seq", "s2ds", + "set3_des_seq", "s3ds", + "set_lr_margin", "smglr", + "set_tb_margin", "smgtb", + "bit_image_repeat", "birep", + "bit_image_newline", "binel", + "bit_image_carriage_return", "bicr", + "color_names", "colornm", + "define_bit_image_region", "defbi", + "end_bit_image_region", "endbi", + "set_color_band", "setcolor", + "set_page_length", "slines", + "display_pc_char", "dispc", + "enter_pc_charset_mode", "smpch", + "exit_pc_charset_mode", "rmpch", + "enter_scancode_mode", "smsc", + "exit_scancode_mode", "rmsc", + "pc_term_options", "pctrm", + "scancode_escape", "scesc", + "alt_scancode_esc", "scesa", + "enter_horizontal_hl_mode", "ehhlm", + "enter_left_hl_mode", "elhlm", + "enter_low_hl_mode", "elohlm", + "enter_right_hl_mode", "erhlm", + "enter_top_hl_mode", "ethlm", + "enter_vertical_hl_mode", "evhlm", + "set_a_attributes", "sgr1", + "set_pglen_inch", "slength", + "termcap_init2", "", + "termcap_reset", "", + "linefeed_if_not_lf", "", + "backspace_if_not_bs", "", + "other_non_function_keys", "", + "arrow_key_map", "", + "acs_ulcorner", "", + "acs_llcorner", "", + "acs_urcorner", "", + "acs_lrcorner", "", + "acs_ltee", "", + "acs_rtee", "", + "acs_btee", "", + "acs_ttee", "", + "acs_hline", "", + "acs_vline", "", + "acs_plus", "", + "memory_lock", "", + "memory_unlock", "", + "box_chars_1", "", +} diff --git a/vendor/github.com/Nvveen/Gotty/gotty.go b/vendor/github.com/Nvveen/Gotty/gotty.go new file mode 100644 index 00000000..093cbf37 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/gotty.go @@ -0,0 +1,238 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Gotty is a Go-package for reading and parsing the terminfo database +package gotty + +// TODO add more concurrency to name lookup, look for more opportunities. + +import ( + "encoding/binary" + "errors" + "fmt" + "os" + "reflect" + "strings" + "sync" +) + +// Open a terminfo file by the name given and construct a TermInfo object. +// If something went wrong reading the terminfo database file, an error is +// returned. +func OpenTermInfo(termName string) (*TermInfo, error) { + var term *TermInfo + var err error + // Find the environment variables + termloc := os.Getenv("TERMINFO") + if len(termloc) == 0 { + // Search like ncurses + locations := []string{os.Getenv("HOME") + "/.terminfo/", "/etc/terminfo/", + "/lib/terminfo/", "/usr/share/terminfo/"} + var path string + for _, str := range locations { + // Construct path + path = str + string(termName[0]) + "/" + termName + // Check if path can be opened + file, _ := os.Open(path) + if file != nil { + // Path can open, fall out and use current path + file.Close() + break + } + } + if len(path) > 0 { + term, err = readTermInfo(path) + } else { + err = errors.New(fmt.Sprintf("No terminfo file(-location) found")) + } + } + return term, err +} + +// Open a terminfo file from the environment variable containing the current +// terminal name and construct a TermInfo object. If something went wrong +// reading the terminfo database file, an error is returned. +func OpenTermInfoEnv() (*TermInfo, error) { + termenv := os.Getenv("TERM") + return OpenTermInfo(termenv) +} + +// Return an attribute by the name attr provided. If none can be found, +// an error is returned. +func (term *TermInfo) GetAttribute(attr string) (stacker, error) { + // Channel to store the main value in. + var value stacker + // Add a blocking WaitGroup + var block sync.WaitGroup + // Keep track of variable being written. + written := false + // Function to put into goroutine. + f := func(ats interface{}) { + var ok bool + var v stacker + // Switch on type of map to use and assign value to it. + switch reflect.TypeOf(ats).Elem().Kind() { + case reflect.Bool: + v, ok = ats.(map[string]bool)[attr] + case reflect.Int16: + v, ok = ats.(map[string]int16)[attr] + case reflect.String: + v, ok = ats.(map[string]string)[attr] + } + // If ok, a value is found, so we can write. + if ok { + value = v + written = true + } + // Goroutine is done + block.Done() + } + block.Add(3) + // Go for all 3 attribute lists. + go f(term.boolAttributes) + go f(term.numAttributes) + go f(term.strAttributes) + // Wait until every goroutine is done. + block.Wait() + // If a value has been written, return it. + if written { + return value, nil + } + // Otherwise, error. + return nil, fmt.Errorf("Erorr finding attribute") +} + +// Return an attribute by the name attr provided. If none can be found, +// an error is returned. A name is first converted to its termcap value. +func (term *TermInfo) GetAttributeName(name string) (stacker, error) { + tc := GetTermcapName(name) + return term.GetAttribute(tc) +} + +// A utility function that finds and returns the termcap equivalent of a +// variable name. +func GetTermcapName(name string) string { + // Termcap name + var tc string + // Blocking group + var wait sync.WaitGroup + // Function to put into a goroutine + f := func(attrs []string) { + // Find the string corresponding to the name + for i, s := range attrs { + if s == name { + tc = attrs[i+1] + } + } + // Goroutine is finished + wait.Done() + } + wait.Add(3) + // Go for all 3 attribute lists + go f(BoolAttr[:]) + go f(NumAttr[:]) + go f(StrAttr[:]) + // Wait until every goroutine is done + wait.Wait() + // Return the termcap name + return tc +} + +// This function takes a path to a terminfo file and reads it in binary +// form to construct the actual TermInfo file. +func readTermInfo(path string) (*TermInfo, error) { + // Open the terminfo file + file, err := os.Open(path) + defer file.Close() + if err != nil { + return nil, err + } + + // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize + // Header is composed of the magic 0432 octal number, size of the name + // section, size of the boolean section, the amount of number values, + // the number of offsets of strings, and the size of the string section. + var header [6]int16 + // Byte array is used to read in byte values + var byteArray []byte + // Short array is used to read in short values + var shArray []int16 + // TermInfo object to store values + var term TermInfo + + // Read in the header + err = binary.Read(file, binary.LittleEndian, &header) + if err != nil { + return nil, err + } + // If magic number isn't there or isn't correct, we have the wrong filetype + if header[0] != 0432 { + return nil, errors.New(fmt.Sprintf("Wrong filetype")) + } + + // Read in the names + byteArray = make([]byte, header[1]) + err = binary.Read(file, binary.LittleEndian, &byteArray) + if err != nil { + return nil, err + } + term.Names = strings.Split(string(byteArray), "|") + + // Read in the booleans + byteArray = make([]byte, header[2]) + err = binary.Read(file, binary.LittleEndian, &byteArray) + if err != nil { + return nil, err + } + term.boolAttributes = make(map[string]bool) + for i, b := range byteArray { + if b == 1 { + term.boolAttributes[BoolAttr[i*2+1]] = true + } + } + // If the number of bytes read is not even, a byte for alignment is added + if len(byteArray)%2 != 0 { + err = binary.Read(file, binary.LittleEndian, make([]byte, 1)) + if err != nil { + return nil, err + } + } + + // Read in shorts + shArray = make([]int16, header[3]) + err = binary.Read(file, binary.LittleEndian, &shArray) + if err != nil { + return nil, err + } + term.numAttributes = make(map[string]int16) + for i, n := range shArray { + if n != 0377 && n > -1 { + term.numAttributes[NumAttr[i*2+1]] = n + } + } + + // Read the offsets into the short array + shArray = make([]int16, header[4]) + err = binary.Read(file, binary.LittleEndian, &shArray) + if err != nil { + return nil, err + } + // Read the actual strings in the byte array + byteArray = make([]byte, header[5]) + err = binary.Read(file, binary.LittleEndian, &byteArray) + if err != nil { + return nil, err + } + term.strAttributes = make(map[string]string) + // We get an offset, and then iterate until the string is null-terminated + for i, offset := range shArray { + if offset > -1 { + r := offset + for ; byteArray[r] != 0; r++ { + } + term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r]) + } + } + return &term, nil +} diff --git a/vendor/github.com/Nvveen/Gotty/parser.go b/vendor/github.com/Nvveen/Gotty/parser.go new file mode 100644 index 00000000..a9d5d23c --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/parser.go @@ -0,0 +1,362 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package gotty + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +var exp = [...]string{ + "%%", + "%c", + "%s", + "%p(\\d)", + "%P([A-z])", + "%g([A-z])", + "%'(.)'", + "%{([0-9]+)}", + "%l", + "%\\+|%-|%\\*|%/|%m", + "%&|%\\||%\\^", + "%=|%>|%<", + "%A|%O", + "%!|%~", + "%i", + "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]", + "%\\?(.*?);", +} + +var regex *regexp.Regexp +var staticVar map[byte]stacker + +// Parses the attribute that is received with name attr and parameters params. +func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) { + // Get the attribute name first. + iface, err := term.GetAttribute(attr) + str, ok := iface.(string) + if err != nil { + return "", err + } + if !ok { + return str, errors.New("Only string capabilities can be parsed.") + } + // Construct the hidden parser struct so we can use a recursive stack based + // parser. + ps := &parser{} + // Dynamic variables only exist in this context. + ps.dynamicVar = make(map[byte]stacker, 26) + ps.parameters = make([]stacker, len(params)) + // Convert the parameters to insert them into the parser struct. + for i, x := range params { + ps.parameters[i] = x + } + // Recursively walk and return. + result, err := ps.walk(str) + return result, err +} + +// Parses the attribute that is received with name attr and parameters params. +// Only works on full name of a capability that is given, which it uses to +// search for the termcap name. +func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) { + tc := GetTermcapName(attr) + return term.Parse(tc, params) +} + +// Identify each token in a stack based manner and do the actual parsing. +func (ps *parser) walk(attr string) (string, error) { + // We use a buffer to get the modified string. + var buf bytes.Buffer + // Next, find and identify all tokens by their indices and strings. + tokens := regex.FindAllStringSubmatch(attr, -1) + if len(tokens) == 0 { + return attr, nil + } + indices := regex.FindAllStringIndex(attr, -1) + q := 0 // q counts the matches of one token + // Iterate through the string per character. + for i := 0; i < len(attr); i++ { + // If the current position is an identified token, execute the following + // steps. + if q < len(indices) && i >= indices[q][0] && i < indices[q][1] { + // Switch on token. + switch { + case tokens[q][0][:2] == "%%": + // Literal percentage character. + buf.WriteByte('%') + case tokens[q][0][:2] == "%c": + // Pop a character. + c, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + buf.WriteByte(c.(byte)) + case tokens[q][0][:2] == "%s": + // Pop a string. + str, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + if _, ok := str.(string); !ok { + return buf.String(), errors.New("Stack head is not a string") + } + buf.WriteString(str.(string)) + case tokens[q][0][:2] == "%p": + // Push a parameter on the stack. + index, err := strconv.ParseInt(tokens[q][1], 10, 8) + index-- + if err != nil { + return buf.String(), err + } + if int(index) >= len(ps.parameters) { + return buf.String(), errors.New("Parameters index out of bound") + } + ps.st.push(ps.parameters[index]) + case tokens[q][0][:2] == "%P": + // Pop a variable from the stack as a dynamic or static variable. + val, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + index := tokens[q][2] + if len(index) > 1 { + errorStr := fmt.Sprintf("%s is not a valid dynamic variables index", + index) + return buf.String(), errors.New(errorStr) + } + // Specify either dynamic or static. + if index[0] >= 'a' && index[0] <= 'z' { + ps.dynamicVar[index[0]] = val + } else if index[0] >= 'A' && index[0] <= 'Z' { + staticVar[index[0]] = val + } + case tokens[q][0][:2] == "%g": + // Push a variable from the stack as a dynamic or static variable. + index := tokens[q][3] + if len(index) > 1 { + errorStr := fmt.Sprintf("%s is not a valid static variables index", + index) + return buf.String(), errors.New(errorStr) + } + var val stacker + if index[0] >= 'a' && index[0] <= 'z' { + val = ps.dynamicVar[index[0]] + } else if index[0] >= 'A' && index[0] <= 'Z' { + val = staticVar[index[0]] + } + ps.st.push(val) + case tokens[q][0][:2] == "%'": + // Push a character constant. + con := tokens[q][4] + if len(con) > 1 { + errorStr := fmt.Sprintf("%s is not a valid character constant", con) + return buf.String(), errors.New(errorStr) + } + ps.st.push(con[0]) + case tokens[q][0][:2] == "%{": + // Push an integer constant. + con, err := strconv.ParseInt(tokens[q][5], 10, 32) + if err != nil { + return buf.String(), err + } + ps.st.push(con) + case tokens[q][0][:2] == "%l": + // Push the length of the string that is popped from the stack. + popStr, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + if _, ok := popStr.(string); !ok { + errStr := fmt.Sprintf("Stack head is not a string") + return buf.String(), errors.New(errStr) + } + ps.st.push(len(popStr.(string))) + case tokens[q][0][:2] == "%?": + // If-then-else construct. First, the whole string is identified and + // then inside this substring, we can specify which parts to switch on. + ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);") + ifTokens := ifReg.FindStringSubmatch(tokens[q][0]) + var ( + ifStr string + err error + ) + // Parse the if-part to determine if-else. + if len(ifTokens[1]) > 0 { + ifStr, err = ps.walk(ifTokens[1]) + } else { // else + ifStr, err = ps.walk(ifTokens[4]) + } + // Return any errors + if err != nil { + return buf.String(), err + } else if len(ifStr) > 0 { + // Self-defined limitation, not sure if this is correct, but didn't + // seem like it. + return buf.String(), errors.New("If-clause cannot print statements") + } + var thenStr string + // Pop the first value that is set by parsing the if-clause. + choose, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + // Switch to if or else. + if choose.(int) == 0 && len(ifTokens[1]) > 0 { + thenStr, err = ps.walk(ifTokens[3]) + } else if choose.(int) != 0 { + if len(ifTokens[1]) > 0 { + thenStr, err = ps.walk(ifTokens[2]) + } else { + thenStr, err = ps.walk(ifTokens[5]) + } + } + if err != nil { + return buf.String(), err + } + buf.WriteString(thenStr) + case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits. + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 'x': + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 'X': + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 's': + token := tokens[q][0] + // Remove the : that comes before a flag. + if token[1] == ':' { + token = token[:1] + token[2:] + } + digit, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + // The rest is determined like the normal formatted prints. + digitStr := fmt.Sprintf(token, digit.(int)) + buf.WriteString(digitStr) + case tokens[q][0][:2] == "%i": + // Increment the parameters by one. + if len(ps.parameters) < 2 { + return buf.String(), errors.New("Not enough parameters to increment.") + } + val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int) + val1++ + val2++ + ps.parameters[0], ps.parameters[1] = val1, val2 + default: + // The rest of the tokens is a special case, where two values are + // popped and then operated on by the token that comes after them. + op1, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + op2, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + var result stacker + switch tokens[q][0][:2] { + case "%+": + // Addition + result = op2.(int) + op1.(int) + case "%-": + // Subtraction + result = op2.(int) - op1.(int) + case "%*": + // Multiplication + result = op2.(int) * op1.(int) + case "%/": + // Division + result = op2.(int) / op1.(int) + case "%m": + // Modulo + result = op2.(int) % op1.(int) + case "%&": + // Bitwise AND + result = op2.(int) & op1.(int) + case "%|": + // Bitwise OR + result = op2.(int) | op1.(int) + case "%^": + // Bitwise XOR + result = op2.(int) ^ op1.(int) + case "%=": + // Equals + result = op2 == op1 + case "%>": + // Greater-than + result = op2.(int) > op1.(int) + case "%<": + // Lesser-than + result = op2.(int) < op1.(int) + case "%A": + // Logical AND + result = op2.(bool) && op1.(bool) + case "%O": + // Logical OR + result = op2.(bool) || op1.(bool) + case "%!": + // Logical complement + result = !op1.(bool) + case "%~": + // Bitwise complement + result = ^(op1.(int)) + } + ps.st.push(result) + } + + i = indices[q][1] - 1 + q++ + } else { + // We are not "inside" a token, so just skip until the end or the next + // token, and add all characters to the buffer. + j := i + if q != len(indices) { + for !(j >= indices[q][0] && j < indices[q][1]) { + j++ + } + } else { + j = len(attr) + } + buf.WriteString(string(attr[i:j])) + i = j + } + } + // Return the buffer as a string. + return buf.String(), nil +} + +// Push a stacker-value onto the stack. +func (st *stack) push(s stacker) { + *st = append(*st, s) +} + +// Pop a stacker-value from the stack. +func (st *stack) pop() (stacker, error) { + if len(*st) == 0 { + return nil, errors.New("Stack is empty.") + } + newStack := make(stack, len(*st)-1) + val := (*st)[len(*st)-1] + copy(newStack, (*st)[:len(*st)-1]) + *st = newStack + return val, nil +} + +// Initialize regexes and the static vars (that don't get changed between +// calls. +func init() { + // Initialize the main regex. + expStr := strings.Join(exp[:], "|") + regex, _ = regexp.Compile(expStr) + // Initialize the static variables. + staticVar = make(map[byte]stacker, 26) +} diff --git a/vendor/github.com/Nvveen/Gotty/types.go b/vendor/github.com/Nvveen/Gotty/types.go new file mode 100644 index 00000000..9bcc65e9 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/types.go @@ -0,0 +1,23 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package gotty + +type TermInfo struct { + boolAttributes map[string]bool + numAttributes map[string]int16 + strAttributes map[string]string + // The various names of the TermInfo file. + Names []string +} + +type stacker interface { +} +type stack []stacker + +type parser struct { + st stack + parameters []stacker + dynamicVar map[byte]stacker +} diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE deleted file mode 100644 index 21253788..00000000 --- a/vendor/github.com/apparentlymart/go-cidr/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Martin Atkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go deleted file mode 100644 index ed749d4c..00000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ /dev/null @@ -1,218 +0,0 @@ -// Package cidr is a collection of assorted utilities for computing -// network and host addresses within network ranges. -// -// It expects a CIDR-type address structure where addresses are divided into -// some number of prefix bits representing the network and then the remaining -// suffix bits represent the host. -// -// For example, it can help to calculate addresses for sub-networks of a -// parent network, or to calculate host addresses within a particular prefix. -// -// At present this package is prioritizing simplicity of implementation and -// de-prioritizing speed and memory usage. Thus caution is advised before -// using this package in performance-critical applications or hot codepaths. -// Patches to improve the speed and memory usage may be accepted as long as -// they do not result in a significant increase in code complexity. -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -// Subnet takes a parent CIDR range and creates a subnet within it -// with the given number of additional prefix bits and the given -// network number. -// -// For example, 10.3.0.0/16, extended by 8 bits, with a network number -// of 5, becomes 10.3.5.0/24 . -func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { - ip := base.IP - mask := base.Mask - - parentLen, addrLen := mask.Size() - newPrefixLen := parentLen + newBits - - if newPrefixLen > addrLen { - return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) - } - - maxNetNum := uint64(1< maxNetNum { - return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) - } - - return &net.IPNet{ - IP: insertNumIntoIP(ip, big.NewInt(int64(num)), newPrefixLen), - Mask: net.CIDRMask(newPrefixLen, addrLen), - }, nil -} - -// Host takes a parent CIDR range and turns it into a host IP address with -// the given host number. -// -// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. -func Host(base *net.IPNet, num int) (net.IP, error) { - ip := base.IP - mask := base.Mask - bigNum := big.NewInt(int64(num)) - - parentLen, addrLen := mask.Size() - hostLen := addrLen - parentLen - - maxHostNum := big.NewInt(int64(1)) - maxHostNum.Lsh(maxHostNum, uint(hostLen)) - maxHostNum.Sub(maxHostNum, big.NewInt(1)) - - numUint64 := big.NewInt(int64(bigNum.Uint64())) - if bigNum.Cmp(big.NewInt(0)) == -1 { - numUint64.Neg(bigNum) - numUint64.Sub(numUint64, big.NewInt(int64(1))) - bigNum.Sub(maxHostNum, numUint64) - } - - if numUint64.Cmp(maxHostNum) == 1 { - return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) - } - var bitlength int - if ip.To4() != nil { - bitlength = 32 - } else { - bitlength = 128 - } - return insertNumIntoIP(ip, bigNum, bitlength), nil -} - -// AddressRange returns the first and last addresses in the given CIDR range. -func AddressRange(network *net.IPNet) (net.IP, net.IP) { - // the first IP is easy - firstIP := network.IP - - // the last IP is the network address OR NOT the mask address - prefixLen, bits := network.Mask.Size() - if prefixLen == bits { - // Easy! - // But make sure that our two slices are distinct, since they - // would be in all other cases. - lastIP := make([]byte, len(firstIP)) - copy(lastIP, firstIP) - return firstIP, lastIP - } - - firstIPInt, bits := ipToInt(firstIP) - hostLen := uint(bits) - uint(prefixLen) - lastIPInt := big.NewInt(1) - lastIPInt.Lsh(lastIPInt, hostLen) - lastIPInt.Sub(lastIPInt, big.NewInt(1)) - lastIPInt.Or(lastIPInt, firstIPInt) - - return firstIP, intToIP(lastIPInt, bits) -} - -// AddressCount returns the number of distinct host addresses within the given -// CIDR range. -// -// Since the result is a uint64, this function returns meaningful information -// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. -func AddressCount(network *net.IPNet) uint64 { - prefixLen, bits := network.Mask.Size() - return 1 << (uint64(bits) - uint64(prefixLen)) -} - -//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies -//none of the subnets overlap and all subnets are in the supernet -//it returns an error if any of those conditions are not satisfied -func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { - firstLastIP := make([][]net.IP, len(subnets)) - for i, s := range subnets { - first, last := AddressRange(s) - firstLastIP[i] = []net.IP{first, last} - } - for i, s := range subnets { - if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { - return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) - } - for j := 0; j < len(subnets); j++ { - if i == j { - continue - } - - first := firstLastIP[j][0] - last := firstLastIP[j][1] - if s.Contains(first) || s.Contains(last) { - return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) - } - } - } - return nil -} - -// PreviousSubnet returns the subnet of the desired mask in the IP space -// just lower than the start of IPNet provided. If the IP space rolls over -// then the second return value is true -func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - startIP := checkIPv4(network.IP) - previousIP := make(net.IP, len(startIP)) - copy(previousIP, startIP) - cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) - previousIP = Dec(previousIP) - previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} - if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { - return previous, true - } - return previous, false -} - -// NextSubnet returns the next available subnet of the desired mask size -// starting for the maximum IP of the offset subnet -// If the IP exceeds the maxium IP then the second return value is true -func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - _, currentLast := AddressRange(network) - mask := net.CIDRMask(prefixLen, 8*len(currentLast)) - currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} - _, last := AddressRange(currentSubnet) - last = Inc(last) - next := &net.IPNet{IP: last.Mask(mask), Mask: mask} - if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { - return next, true - } - return next, false -} - -//Inc increases the IP by one this returns a new []byte for the IP -func Inc(IP net.IP) net.IP { - IP = checkIPv4(IP) - incIP := make([]byte, len(IP)) - copy(incIP, IP) - for j := len(incIP) - 1; j >= 0; j-- { - incIP[j]++ - if incIP[j] > 0 { - break - } - } - return incIP -} - -//Dec decreases the IP by one this returns a new []byte for the IP -func Dec(IP net.IP) net.IP { - IP = checkIPv4(IP) - decIP := make([]byte, len(IP)) - copy(decIP, IP) - decIP = checkIPv4(decIP) - for j := len(decIP) - 1; j >= 0; j-- { - decIP[j]-- - if decIP[j] < 255 { - break - } - } - return decIP -} - -func checkIPv4(ip net.IP) net.IP { - // Go for some reason allocs IPv6len for IPv4 so we have to correct it - if v4 := ip.To4(); v4 != nil { - return v4 - } - return ip -} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go deleted file mode 100644 index e5e6a2cf..00000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go +++ /dev/null @@ -1,37 +0,0 @@ -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -func ipToInt(ip net.IP) (*big.Int, int) { - val := &big.Int{} - val.SetBytes([]byte(ip)) - if len(ip) == net.IPv4len { - return val, 32 - } else if len(ip) == net.IPv6len { - return val, 128 - } else { - panic(fmt.Errorf("Unsupported address length %d", len(ip))) - } -} - -func intToIP(ipInt *big.Int, bits int) net.IP { - ipBytes := ipInt.Bytes() - ret := make([]byte, bits/8) - // Pack our IP bytes into the end of the return array, - // since big.Int.Bytes() removes front zero padding. - for i := 1; i <= len(ipBytes); i++ { - ret[len(ret)-i] = ipBytes[len(ipBytes)-i] - } - return net.IP(ret) -} - -func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP { - ipInt, totalBits := ipToInt(ip) - bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) - ipInt.Or(ipInt, bigNum) - return intToIP(ipInt, totalBits) -} diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml deleted file mode 100644 index 1a0bbea6..00000000 --- a/vendor/github.com/armon/go-radix/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: - - tip diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE deleted file mode 100644 index a5df10e6..00000000 --- a/vendor/github.com/armon/go-radix/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md deleted file mode 100644 index 26f42a28..00000000 --- a/vendor/github.com/armon/go-radix/README.md +++ /dev/null @@ -1,38 +0,0 @@ -go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) -========= - -Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). -The package only provides a single `Tree` implementation, optimized for sparse nodes. - -As a radix tree, it provides the following: - * O(k) operations. In many cases, this can be faster than a hash table since - the hash function is an O(k) operation, and hash tables have very poor cache locality. - * Minimum / Maximum value lookups - * Ordered iteration - -For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). - -Example -======= - -Below is a simple example of usage - -```go -// Create a tree -r := radix.New() -r.Insert("foo", 1) -r.Insert("bar", 2) -r.Insert("foobar", 2) - -// Find the longest prefix match -m, _, _ := r.LongestPrefix("foozip") -if m != "foo" { - panic("should be foo") -} -``` - diff --git a/vendor/github.com/armon/go-radix/go.mod b/vendor/github.com/armon/go-radix/go.mod deleted file mode 100644 index 4336aa29..00000000 --- a/vendor/github.com/armon/go-radix/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/armon/go-radix diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go deleted file mode 100644 index e2bb22eb..00000000 --- a/vendor/github.com/armon/go-radix/radix.go +++ /dev/null @@ -1,540 +0,0 @@ -package radix - -import ( - "sort" - "strings" -) - -// WalkFn is used when walking the tree. Takes a -// key and value, returning if iteration should -// be terminated. -type WalkFn func(s string, v interface{}) bool - -// leafNode is used to represent a value -type leafNode struct { - key string - val interface{} -} - -// edge is used to represent an edge node -type edge struct { - label byte - node *node -} - -type node struct { - // leaf is used to store possible leaf - leaf *leafNode - - // prefix is the common prefix we ignore - prefix string - - // Edges should be stored in-order for iteration. - // We avoid a fully materialized slice to save memory, - // since in most cases we expect to be sparse - edges edges -} - -func (n *node) isLeaf() bool { - return n.leaf != nil -} - -func (n *node) addEdge(e edge) { - n.edges = append(n.edges, e) - n.edges.Sort() -} - -func (n *node) updateEdge(label byte, node *node) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - n.edges[idx].node = node - return - } - panic("replacing missing edge") -} - -func (n *node) getEdge(label byte) *node { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - return n.edges[idx].node - } - return nil -} - -func (n *node) delEdge(label byte) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - copy(n.edges[idx:], n.edges[idx+1:]) - n.edges[len(n.edges)-1] = edge{} - n.edges = n.edges[:len(n.edges)-1] - } -} - -type edges []edge - -func (e edges) Len() int { - return len(e) -} - -func (e edges) Less(i, j int) bool { - return e[i].label < e[j].label -} - -func (e edges) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e edges) Sort() { - sort.Sort(e) -} - -// Tree implements a radix tree. This can be treated as a -// Dictionary abstract data type. The main advantage over -// a standard hash map is prefix-based lookups and -// ordered iteration, -type Tree struct { - root *node - size int -} - -// New returns an empty Tree -func New() *Tree { - return NewFromMap(nil) -} - -// NewFromMap returns a new tree containing the keys -// from an existing map -func NewFromMap(m map[string]interface{}) *Tree { - t := &Tree{root: &node{}} - for k, v := range m { - t.Insert(k, v) - } - return t -} - -// Len is used to return the number of elements in the tree -func (t *Tree) Len() int { - return t.size -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 string) int { - max := len(k1) - if l := len(k2); l < max { - max = l - } - var i int - for i = 0; i < max; i++ { - if k1[i] != k2[i] { - break - } - } - return i -} - -// Insert is used to add a newentry or update -// an existing entry. Returns if updated. -func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { - var parent *node - n := t.root - search := s - for { - // Handle key exhaution - if len(search) == 0 { - if n.isLeaf() { - old := n.leaf.val - n.leaf.val = v - return old, true - } - - n.leaf = &leafNode{ - key: s, - val: v, - } - t.size++ - return nil, false - } - - // Look for the edge - parent = n - n = n.getEdge(search[0]) - - // No edge, create one - if n == nil { - e := edge{ - label: search[0], - node: &node{ - leaf: &leafNode{ - key: s, - val: v, - }, - prefix: search, - }, - } - parent.addEdge(e) - t.size++ - return nil, false - } - - // Determine longest prefix of the search key on match - commonPrefix := longestPrefix(search, n.prefix) - if commonPrefix == len(n.prefix) { - search = search[commonPrefix:] - continue - } - - // Split the node - t.size++ - child := &node{ - prefix: search[:commonPrefix], - } - parent.updateEdge(search[0], child) - - // Restore the existing node - child.addEdge(edge{ - label: n.prefix[commonPrefix], - node: n, - }) - n.prefix = n.prefix[commonPrefix:] - - // Create a new leaf node - leaf := &leafNode{ - key: s, - val: v, - } - - // If the new key is a subset, add to to this node - search = search[commonPrefix:] - if len(search) == 0 { - child.leaf = leaf - return nil, false - } - - // Create a new edge for the node - child.addEdge(edge{ - label: search[0], - node: &node{ - leaf: leaf, - prefix: search, - }, - }) - return nil, false - } -} - -// Delete is used to delete a key, returning the previous -// value and if it was deleted -func (t *Tree) Delete(s string) (interface{}, bool) { - var parent *node - var label byte - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if !n.isLeaf() { - break - } - goto DELETE - } - - // Look for an edge - parent = n - label = search[0] - n = n.getEdge(label) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false - -DELETE: - // Delete the leaf - leaf := n.leaf - n.leaf = nil - t.size-- - - // Check if we should delete this node from the parent - if parent != nil && len(n.edges) == 0 { - parent.delEdge(label) - } - - // Check if we should merge this node - if n != t.root && len(n.edges) == 1 { - n.mergeChild() - } - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - - return leaf.val, true -} - -// DeletePrefix is used to delete the subtree under a prefix -// Returns how many nodes were deleted -// Use this to delete large subtrees efficiently -func (t *Tree) DeletePrefix(s string) int { - return t.deletePrefix(nil, t.root, s) -} - -// delete does a recursive deletion -func (t *Tree) deletePrefix(parent, n *node, prefix string) int { - // Check for key exhaustion - if len(prefix) == 0 { - // Remove the leaf node - subTreeSize := 0 - //recursively walk from all edges of the node to be deleted - recursiveWalk(n, func(s string, v interface{}) bool { - subTreeSize++ - return false - }) - if n.isLeaf() { - n.leaf = nil - } - n.edges = nil // deletes the entire subtree - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - t.size -= subTreeSize - return subTreeSize - } - - // Look for an edge - label := prefix[0] - child := n.getEdge(label) - if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { - return 0 - } - - // Consume the search prefix - if len(child.prefix) > len(prefix) { - prefix = prefix[len(prefix):] - } else { - prefix = prefix[len(child.prefix):] - } - return t.deletePrefix(n, child, prefix) -} - -func (n *node) mergeChild() { - e := n.edges[0] - child := e.node - n.prefix = n.prefix + child.prefix - n.leaf = child.leaf - n.edges = child.edges -} - -// Get is used to lookup a specific key, returning -// the value and if it was found -func (t *Tree) Get(s string) (interface{}, bool) { - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if n.isLeaf() { - return n.leaf.val, true - } - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false -} - -// LongestPrefix is like Get, but instead of an -// exact match, it will return the longest prefix match. -func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { - var last *leafNode - n := t.root - search := s - for { - // Look for a leaf node - if n.isLeaf() { - last = n.leaf - } - - // Check for key exhaution - if len(search) == 0 { - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - if last != nil { - return last.key, last.val, true - } - return "", nil, false -} - -// Minimum is used to return the minimum value in the tree -func (t *Tree) Minimum() (string, interface{}, bool) { - n := t.root - for { - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - if len(n.edges) > 0 { - n = n.edges[0].node - } else { - break - } - } - return "", nil, false -} - -// Maximum is used to return the maximum value in the tree -func (t *Tree) Maximum() (string, interface{}, bool) { - n := t.root - for { - if num := len(n.edges); num > 0 { - n = n.edges[num-1].node - continue - } - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - break - } - return "", nil, false -} - -// Walk is used to walk the tree -func (t *Tree) Walk(fn WalkFn) { - recursiveWalk(t.root, fn) -} - -// WalkPrefix is used to walk the tree under a prefix -func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { - n := t.root - search := prefix - for { - // Check for key exhaution - if len(search) == 0 { - recursiveWalk(n, fn) - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - - } else if strings.HasPrefix(n.prefix, search) { - // Child may be under our search prefix - recursiveWalk(n, fn) - return - } else { - break - } - } - -} - -// WalkPath is used to walk the tree, but only visiting nodes -// from the root down to a given leaf. Where WalkPrefix walks -// all the entries *under* the given prefix, this walks the -// entries *above* the given prefix. -func (t *Tree) WalkPath(path string, fn WalkFn) { - n := t.root - search := path - for { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return - } - - // Check for key exhaution - if len(search) == 0 { - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - return - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } -} - -// recursiveWalk is used to do a pre-order walk of a node -// recursively. Returns true if the walk should be aborted -func recursiveWalk(n *node, fn WalkFn) bool { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return true - } - - // Recurse on the children - for _, e := range n.edges { - if recursiveWalk(e.node, fn) { - return true - } - } - return false -} - -// ToMap is used to walk the tree and convert it into a map -func (t *Tree) ToMap() map[string]interface{} { - out := make(map[string]interface{}, t.size) - t.Walk(func(k string, v interface{}) bool { - out[k] = v - return false - }) - return out -} diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt index 899129ec..5f14d116 100644 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -1,3 +1,3 @@ AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index 99849c0e..56fdfc2b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -138,27 +138,8 @@ type RequestFailure interface { RequestID() string } -// NewRequestFailure returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { return newRequestError(err, statusCode, reqID) } - -// UnmarshalError provides the interface for the SDK failing to unmarshal data. -type UnmarshalError interface { - awsError - Bytes() []byte -} - -// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding -// the bytes that fail to unmarshal to the error. -func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { - return &unmarshalError{ - awsError: New("UnmarshalError", msg, err), - bytes: bytes, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index 9cf7eaf4..0202a008 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -1,9 +1,6 @@ package awserr -import ( - "encoding/hex" - "fmt" -) +import "fmt" // SprintError returns a string of the formatted error code. // @@ -122,7 +119,6 @@ type requestError struct { awsError statusCode int requestID string - bytes []byte } // newRequestError returns a wrapped error with additional information for @@ -174,29 +170,6 @@ func (r requestError) OrigErrs() []error { return []error{r.OrigErr()} } -type unmarshalError struct { - awsError - bytes []byte -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (e unmarshalError) Error() string { - extra := hex.Dump(e.bytes) - return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (e unmarshalError) String() string { - return e.Error() -} - -// Bytes returns the bytes that failed to unmarshal. -func (e unmarshalError) Bytes() []byte { - return e.bytes -} - // An error list that satisfies the golang interface type errorList []error @@ -208,7 +181,7 @@ func (e errorList) Error() string { // How do we want to handle the array size being zero if size := len(e); size > 0 { for i := 0; i < size; i++ { - msg += e[i].Error() + msg += fmt.Sprintf("%s", e[i].Error()) // We check the next index to see if it is within the slice. // If it is, then we append a newline. We do this, because unit tests // could be broken with the additional '\n' diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go index 142a7a01..59fa4a55 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -15,7 +15,7 @@ func DeepEqual(a, b interface{}) bool { rb := reflect.Indirect(reflect.ValueOf(b)) if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type they are equal + // If the elements are both nil, and of the same type the are equal // If they are of different types they are not equal return reflect.TypeOf(a) == reflect.TypeOf(b) } else if raValid != rbValid { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go index 285e54d6..11c52c38 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -185,12 +185,13 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { // SetValueAtPath sets a value at the case insensitive lexical path inside // of a structure. func SetValueAtPath(i interface{}, path string, v interface{}) { - rvals := rValuesAtPath(i, path, true, false, v == nil) - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) } - setValue(rval, v) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go index 645df245..b6432f1a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -23,27 +23,28 @@ func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { case reflect.Struct: buf.WriteString("{\n") + names := []string{} for i := 0; i < v.Type().NumField(); i++ { - ft := v.Type().Field(i) - fv := v.Field(i) - - if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { continue // ignore unexported fields } - if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { continue // ignore unset fields } + names = append(names, name) + } + for i, n := range names { + val := v.FieldByName(n) buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(ft.Name + ": ") + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) - if tag := ft.Tag.Get("sensitive"); tag == "true" { - buf.WriteString("") - } else { - stringValue(fv, indent+2, buf) + if i < len(names)-1 { + buf.WriteString(",\n") } - - buf.WriteString(",\n") } buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index c022407f..212fe25e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -18,7 +18,7 @@ type Config struct { // States that the signing name did not come from a modeled source but // was derived based on other data. Used by service client constructors - // to determine if the signin name can be overridden based on metadata the + // to determine if the signin name can be overriden based on metadata the // service has. SigningNameDerived bool } @@ -64,7 +64,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op default: maxRetries := aws.IntValue(cfg.MaxRetries) if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = DefaultRetryerMaxNumRetries + maxRetries = 3 } svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 0fda4251..a397b0d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,7 +1,6 @@ package client import ( - "math" "strconv" "time" @@ -10,142 +9,82 @@ import ( ) // DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, you can implement the -// request.Retryer interface. +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: // +// type retryer struct { +// client.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { - // Num max Retries is the number of max retries that will be performed. - // By default, this is zero. - NumMaxRetries int - - // MinRetryDelay is the minimum retry delay after which retry will be performed. - // If not set, the value is 0ns. - MinRetryDelay time.Duration - - // MinThrottleRetryDelay is the minimum retry delay when throttled. - // If not set, the value is 0ns. - MinThrottleDelay time.Duration - - // MaxRetryDelay is the maximum retry delay before which retry must be performed. - // If not set, the value is 0ns. - MaxRetryDelay time.Duration - - // MaxThrottleDelay is the maximum retry delay when throttled. - // If not set, the value is 0ns. - MaxThrottleDelay time.Duration + NumMaxRetries int } -const ( - // DefaultRetryerMaxNumRetries sets maximum number of retries - DefaultRetryerMaxNumRetries = 3 - - // DefaultRetryerMinRetryDelay sets minimum retry delay - DefaultRetryerMinRetryDelay = 30 * time.Millisecond - - // DefaultRetryerMinThrottleDelay sets minimum delay when throttled - DefaultRetryerMinThrottleDelay = 500 * time.Millisecond - - // DefaultRetryerMaxRetryDelay sets maximum retry delay - DefaultRetryerMaxRetryDelay = 300 * time.Second - - // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled - DefaultRetryerMaxThrottleDelay = 300 * time.Second -) - // MaxRetries returns the number of maximum returns the service will use to make // an individual API request. func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } -// setRetryerDefaults sets the default values of the retryer if not set -func (d *DefaultRetryer) setRetryerDefaults() { - if d.MinRetryDelay == 0 { - d.MinRetryDelay = DefaultRetryerMinRetryDelay - } - if d.MaxRetryDelay == 0 { - d.MaxRetryDelay = DefaultRetryerMaxRetryDelay - } - if d.MinThrottleDelay == 0 { - d.MinThrottleDelay = DefaultRetryerMinThrottleDelay - } - if d.MaxThrottleDelay == 0 { - d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay - } -} - // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - - // if number of max retries is zero, no retries will be performed. - if d.NumMaxRetries == 0 { - return 0 - } - - // Sets default value for retryer members - d.setRetryerDefaults() - - // minDelay is the minimum retryer delay - minDelay := d.MinRetryDelay - - var initialDelay time.Duration - - isThrottle := r.IsErrorThrottle() - if isThrottle { - if delay, ok := getRetryAfterDelay(r); ok { - initialDelay = delay + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + if delay, ok := getRetryDelay(r); ok { + return delay } - minDelay = d.MinThrottleDelay - } - - retryCount := r.RetryCount - // maxDelay the maximum retryer delay - maxDelay := d.MaxRetryDelay - - if isThrottle { - maxDelay = d.MaxThrottleDelay + minTime = 500 } - var delay time.Duration - - // Logic to cap the retry count based on the minDelay provided - actualRetryCount := int(math.Log2(float64(minDelay))) + 1 - if actualRetryCount < 63-retryCount { - delay = time.Duration(1< maxDelay { - delay = getJitterDelay(maxDelay / 2) - } - } else { - delay = getJitterDelay(maxDelay / 2) + retryCount := r.RetryCount + if throttle && retryCount > 8 { + retryCount = 8 + } else if retryCount > 13 { + retryCount = 13 } - return delay + initialDelay -} -// getJitterDelay returns a jittered delay for retry -func getJitterDelay(duration time.Duration) time.Duration { - return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond } // ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - - // ShouldRetry returns false if number of max retries is 0. - if d.NumMaxRetries == 0 { - return false - } - // If one of the other handlers already set the retry state // we don't want to override it based on the service's state if r.Retryable != nil { return *r.Retryable } - return r.IsErrorRetryable() || r.IsErrorThrottle() + + if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 502: + case 503: + case 504: + default: + return r.IsErrorThrottle() + } + + return true } // This will look in the Retry-After header, RFC 7231, for how long // it will wait before attempting another request -func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { +func getRetryDelay(r *request.Request) (time.Duration, bool) { if !canUseRetryAfterHeader(r) { return 0, false } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 8958c32d..ce9fb896 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -67,14 +67,10 @@ func logRequest(r *request.Request) { if !bodySeekable { r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) } - // Reset the request body because dumpRequest will re-wrap the - // r.HTTPRequest's Body as a NoOpCloser and will not be reset after - // read by the HTTP client reader. - if err := r.Error; err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() } r.Config.Logger.Log(fmt.Sprintf(logReqMsg, @@ -122,12 +118,6 @@ var LogHTTPResponseHandler = request.NamedHandler{ func logResponse(r *request.Request) { lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - if r.HTTPResponse == nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) - return - } - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) if logBody { r.HTTPResponse.Body = &teeReaderCloser{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go deleted file mode 100644 index 881d575f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// NoOpRetryer provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type NoOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d NoOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index fd1e240f..e9695ef2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -20,7 +20,7 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetries configuration to be shared by multiple +// // Create Session with MaxRetry configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), @@ -239,19 +239,12 @@ type Config struct { // Key: aws.String("/foo/bar/moo"), // }) EnableEndpointDiscovery *bool - - // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing - // request endpoint hosts with modeled information. - // - // Disabling this feature is useful when you want to use local endpoints - // for testing that do not support the modeled host prefix pattern. - DisableEndpointHostPrefix *bool } // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetries configuration to be shared by multiple +// // Create Session with MaxRetry configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), @@ -406,13 +399,6 @@ func (c *Config) WithEndpointDiscovery(t bool) *Config { return c } -// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix -// when making requests. -func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { - c.DisableEndpointHostPrefix = &t - return c -} - // MergeIn merges the passed in configs into the existing config object. func (c *Config) MergeIn(cfgs ...*Config) { for _, other := range cfgs { @@ -516,10 +502,6 @@ func mergeInConfig(dst *Config, other *Config) { if other.EnableEndpointDiscovery != nil { dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery } - - if other.DisableEndpointHostPrefix != nil { - dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix - } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go similarity index 58% rename from vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go rename to vendor/github.com/aws/aws-sdk-go/aws/context.go index 2866f9a7..79f42685 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context.go @@ -1,8 +1,8 @@ -// +build !go1.9 - package aws -import "time" +import ( + "time" +) // Context is an copy of the Go v1.7 stdlib's context.Context interface. // It is represented as a SDK interface to enable you to use the "WithContext" @@ -35,3 +35,37 @@ type Context interface { // functions. Value(key interface{}) interface{} } + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go similarity index 59% rename from vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go rename to vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go index 66c5945d..8fdda530 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go @@ -39,18 +39,3 @@ func (e *emptyCtx) String() string { var ( backgroundCtx = new(emptyCtx) ) - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return backgroundCtx -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go new file mode 100644 index 00000000..064f75c9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go @@ -0,0 +1,9 @@ +// +build go1.7 + +package aws + +import "context" + +var ( + backgroundCtx = context.Background() +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go deleted file mode 100644 index 3718b26e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.9 - -package aws - -import "context" - -// Context is an alias of the Go stdlib's context.Context interface. -// It can be used within the SDK's API operation "WithContext" methods. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go deleted file mode 100644 index 9c29f29a..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build go1.7 - -package aws - -import "context" - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return context.Background() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go deleted file mode 100644 index 304fd156..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go +++ /dev/null @@ -1,24 +0,0 @@ -package aws - -import ( - "time" -) - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index 4e076c18..ff5d58e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -179,242 +179,6 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } -// Uint returns a pointer to the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pointer passed in or -// 0 if the pointer is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values uinto a slice of -// uint pointers -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pointers uinto a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values uinto a string -// map of uint pointers -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pointers uinto a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int8 returns a pointer to the int8 value passed in. -func Int8(v int8) *int8 { - return &v -} - -// Int8Value returns the value of the int8 pointer passed in or -// 0 if the pointer is nil. -func Int8Value(v *int8) int8 { - if v != nil { - return *v - } - return 0 -} - -// Int8Slice converts a slice of int8 values into a slice of -// int8 pointers -func Int8Slice(src []int8) []*int8 { - dst := make([]*int8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int8ValueSlice converts a slice of int8 pointers into a slice of -// int8 values -func Int8ValueSlice(src []*int8) []int8 { - dst := make([]int8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int8Map converts a string map of int8 values into a string -// map of int8 pointers -func Int8Map(src map[string]int8) map[string]*int8 { - dst := make(map[string]*int8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int8ValueMap converts a string map of int8 pointers into a string -// map of int8 values -func Int8ValueMap(src map[string]*int8) map[string]int8 { - dst := make(map[string]int8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int16 returns a pointer to the int16 value passed in. -func Int16(v int16) *int16 { - return &v -} - -// Int16Value returns the value of the int16 pointer passed in or -// 0 if the pointer is nil. -func Int16Value(v *int16) int16 { - if v != nil { - return *v - } - return 0 -} - -// Int16Slice converts a slice of int16 values into a slice of -// int16 pointers -func Int16Slice(src []int16) []*int16 { - dst := make([]*int16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int16ValueSlice converts a slice of int16 pointers into a slice of -// int16 values -func Int16ValueSlice(src []*int16) []int16 { - dst := make([]int16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int16Map converts a string map of int16 values into a string -// map of int16 pointers -func Int16Map(src map[string]int16) map[string]*int16 { - dst := make(map[string]*int16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int16ValueMap converts a string map of int16 pointers into a string -// map of int16 values -func Int16ValueMap(src map[string]*int16) map[string]int16 { - dst := make(map[string]int16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int32 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int32 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - // Int64 returns a pointer to the int64 value passed in. func Int64(v int64) *int64 { return &v @@ -474,301 +238,6 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } -// Uint8 returns a pointer to the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return &v -} - -// Uint8Value returns the value of the uint8 pointer passed in or -// 0 if the pointer is nil. -func Uint8Value(v *uint8) uint8 { - if v != nil { - return *v - } - return 0 -} - -// Uint8Slice converts a slice of uint8 values into a slice of -// uint8 pointers -func Uint8Slice(src []uint8) []*uint8 { - dst := make([]*uint8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint8ValueSlice converts a slice of uint8 pointers into a slice of -// uint8 values -func Uint8ValueSlice(src []*uint8) []uint8 { - dst := make([]uint8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint8Map converts a string map of uint8 values into a string -// map of uint8 pointers -func Uint8Map(src map[string]uint8) map[string]*uint8 { - dst := make(map[string]*uint8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint8ValueMap converts a string map of uint8 pointers into a string -// map of uint8 values -func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { - dst := make(map[string]uint8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint16 returns a pointer to the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Value returns the value of the uint16 pointer passed in or -// 0 if the pointer is nil. -func Uint16Value(v *uint16) uint16 { - if v != nil { - return *v - } - return 0 -} - -// Uint16Slice converts a slice of uint16 values into a slice of -// uint16 pointers -func Uint16Slice(src []uint16) []*uint16 { - dst := make([]*uint16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint16ValueSlice converts a slice of uint16 pointers into a slice of -// uint16 values -func Uint16ValueSlice(src []*uint16) []uint16 { - dst := make([]uint16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint16Map converts a string map of uint16 values into a string -// map of uint16 pointers -func Uint16Map(src map[string]uint16) map[string]*uint16 { - dst := make(map[string]*uint16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint16ValueMap converts a string map of uint16 pointers into a string -// map of uint16 values -func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { - dst := make(map[string]uint16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pointer to the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint32 pointer passed in or -// 0 if the pointer is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint32 values into a slice of -// uint32 pointers -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pointers into a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values into a string -// map of uint32 pointers -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pointers into a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pointer to the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pointer passed in or -// 0 if the pointer is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values into a slice of -// uint64 pointers -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pointers into a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values into a string -// map of uint64 pointers -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pointers into a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float32 returns a pointer to the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Value returns the value of the float32 pointer passed in or -// 0 if the pointer is nil. -func Float32Value(v *float32) float32 { - if v != nil { - return *v - } - return 0 -} - -// Float32Slice converts a slice of float32 values into a slice of -// float32 pointers -func Float32Slice(src []float32) []*float32 { - dst := make([]*float32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float32ValueSlice converts a slice of float32 pointers into a slice of -// float32 values -func Float32ValueSlice(src []*float32) []float32 { - dst := make([]float32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float32Map converts a string map of float32 values into a string -// map of float32 pointers -func Float32Map(src map[string]float32) map[string]*float32 { - dst := make(map[string]*float32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float32ValueMap converts a string map of float32 pointers into a string -// map of float32 values -func Float32ValueMap(src map[string]*float32) map[string]float32 { - dst := make(map[string]float32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - // Float64 returns a pointer to the float64 value passed in. func Float64(v float64) *float64 { return &v diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 0c60e612..cfcddf3d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -72,9 +72,9 @@ var ValidateReqSigHandler = request.NamedHandler{ signedTime = r.LastSignedAt } - // 5 minutes to allow for some clock skew/delays in transmission. + // 10 minutes to allow for some clock skew/delays in transmission. // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(5 * time.Minute).After(time.Now()) { + if signedTime.Add(10 * time.Minute).After(time.Now()) { return } @@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) { Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } - // Catch all request errors, and let the default retrier determine - // if the error is retryable. + // Catch all other request errors. r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable // Override the error with a context canceled error, if that was canceled. ctx := r.Context() @@ -184,39 +184,37 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{ - Name: "core.AfterRetryHandler", - Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } - r.RetryCount++ - r.Error = nil + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() } - }} + + r.RetryCount++ + r.Error = nil + } +}} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go index ab69c7a6..a15f496b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -17,7 +17,7 @@ var SDKVersionUserAgentHandler = request.NamedHandler{ } const execEnvVar = `AWS_EXECUTION_ENV` -const execEnvUAKey = `exec-env` +const execEnvUAKey = `exec_env` // AddHostExecEnvUserAgentHander is a request handler appending the SDK's // execution environment to the user agent. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 4af59215..dc82f4c3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -49,11 +49,8 @@ package credentials import ( - "fmt" "sync" "time" - - "github.com/aws/aws-sdk-go/aws/awserr" ) // AnonymousCredentials is an empty Credential object that can be used as @@ -84,12 +81,6 @@ type Value struct { ProviderName string } -// HasKeys returns if the credentials Value has both AccessKeyID and -// SecretAccessKey value set. -func (v Value) HasKeys() bool { - return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 -} - // A Provider is the interface for any component which will provide credentials // Value. A provider is required to manage its own Expired state, and what to // be expired means. @@ -106,14 +97,6 @@ type Provider interface { IsExpired() bool } -// An Expirer is an interface that Providers can implement to expose the expiration -// time, if known. If the Provider cannot accurately provide this info, -// it should not implement this interface. -type Expirer interface { - // The time at which the credentials are no longer valid - ExpiresAt() time.Time -} - // An ErrorProvider is a stub credentials provider that always returns an error // this is used by the SDK when construction a known provider is not possible // due to an error. @@ -180,11 +163,6 @@ func (e *Expiry) IsExpired() bool { return e.expiration.Before(curTime()) } -// ExpiresAt returns the expiration time of the credential -func (e *Expiry) ExpiresAt() time.Time { - return e.expiration -} - // A Credentials provides concurrency safe retrieval of AWS credentials Value. // Credentials will cache the credentials value until they expire. Once the value // expires the next Get will attempt to retrieve valid credentials. @@ -277,23 +255,3 @@ func (c *Credentials) IsExpired() bool { func (c *Credentials) isExpired() bool { return c.forceRefresh || c.provider.IsExpired() } - -// ExpiresAt provides access to the functionality of the Expirer interface of -// the underlying Provider, if it supports that interface. Otherwise, it returns -// an error. -func (c *Credentials) ExpiresAt() (time.Time, error) { - c.m.RLock() - defer c.m.RUnlock() - - expirer, ok := c.provider.(Expirer) - if !ok { - return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), - nil) - } - if c.forceRefresh { - // set expiration time to the distant past - return time.Time{}, nil - } - return expirer.ExpiresAt(), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index 43d4ed38..0ed791be 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkuri" ) @@ -143,8 +142,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { } if err := s.Err(); err != nil { - return nil, awserr.New(request.ErrCodeSerialization, - "failed to read EC2 instance role from metadata service", err) + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) } return credsList, nil @@ -166,7 +164,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred respCreds := ec2RoleCredRespBody{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { return ec2RoleCredRespBody{}, - awserr.New(request.ErrCodeSerialization, + awserr.New("SerializationError", fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index 1a7af53a..ace51313 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -39,7 +39,6 @@ import ( "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" ) // ProviderName is the name of the credentials provider. @@ -98,8 +97,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin return p } -// NewCredentialsClient returns a pointer to a new Credentials object -// wrapping the endpoint credentials Provider. +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) } @@ -175,7 +174,7 @@ func unmarshalHandler(r *request.Request) { out := r.Data.(*getCredentialsOutput) if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, + r.Error = awserr.New("SerializationError", "failed to decode endpoint credentials", err, ) @@ -186,15 +185,11 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() var errOut errorOutput - err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to decode error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, ) - return } // Response body format is not consistent between metadata endpoints. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go deleted file mode 100644 index 1980c8c1..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ /dev/null @@ -1,425 +0,0 @@ -/* -Package processcreds is a credential Provider to retrieve `credential_process` -credentials. - -WARNING: The following describes a method of sourcing credentials from an external -process. This can potentially be dangerous, so proceed with caution. Other -credential providers should be preferred if at all possible. If using this -option, you should make sure that the config file is as locked down as possible -using security best practices for your operating system. - -You can use credentials from a `credential_process` in a variety of ways. - -One way is to setup your shared config file, located in the default -location, with the `credential_process` key and the command you want to be -called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable -(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. - - [default] - credential_process = /command/to/call - -Creating a new session will use the credential process to retrieve credentials. -NOTE: If there are credentials in the profile you are using, the credential -process will not be used. - - // Initialize a session to load credentials. - sess, _ := session.NewSession(&aws.Config{ - Region: aws.String("us-east-1")}, - ) - - // Create S3 service client to use the credentials. - svc := s3.New(sess) - -Another way to use the `credential_process` method is by using -`credentials.NewCredentials()` and providing a command to be executed to -retrieve credentials: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentials("/path/to/command") - - // Create service client value configured for credentials. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -You can set a non-default timeout for the `credential_process` with another -constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To -set a one minute timeout: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentialsTimeout( - "/path/to/command", - time.Duration(500) * time.Millisecond) - -If you need more control, you can set any configurable options in the -credentials using one or more option functions. For example, you can set a two -minute timeout, a credential duration of 60 minutes, and a maximum stdout -buffer size of 2k. - - creds := processcreds.NewCredentials( - "/path/to/command", - func(opt *ProcessProvider) { - opt.Timeout = time.Duration(2) * time.Minute - opt.Duration = time.Duration(60) * time.Minute - opt.MaxBufSize = 2048 - }) - -You can also use your own `exec.Cmd`: - - // Create an exec.Cmd - myCommand := exec.Command("/path/to/command") - - // Create credentials using your exec.Cmd and custom timeout - creds := processcreds.NewCredentialsCommand( - myCommand, - func(opt *processcreds.ProcessProvider) { - opt.Timeout = time.Duration(1) * time.Second - }) -*/ -package processcreds - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "runtime" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" -) - -const ( - // ProviderName is the name this credentials provider will label any - // returned credentials Value with. - ProviderName = `ProcessProvider` - - // ErrCodeProcessProviderParse error parsing process output - ErrCodeProcessProviderParse = "ProcessProviderParseError" - - // ErrCodeProcessProviderVersion version error in output - ErrCodeProcessProviderVersion = "ProcessProviderVersionError" - - // ErrCodeProcessProviderRequired required attribute missing in output - ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" - - // ErrCodeProcessProviderExecution execution of command failed - ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" - - // errMsgProcessProviderTimeout process took longer than allowed - errMsgProcessProviderTimeout = "credential process timed out" - - // errMsgProcessProviderProcess process error - errMsgProcessProviderProcess = "error in credential_process" - - // errMsgProcessProviderParse problem parsing output - errMsgProcessProviderParse = "parse failed of credential_process output" - - // errMsgProcessProviderVersion version error in output - errMsgProcessProviderVersion = "wrong version in process output (not 1)" - - // errMsgProcessProviderMissKey missing access key id in output - errMsgProcessProviderMissKey = "missing AccessKeyId in process output" - - // errMsgProcessProviderMissSecret missing secret acess key in output - errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" - - // errMsgProcessProviderPrepareCmd prepare of command failed - errMsgProcessProviderPrepareCmd = "failed to prepare command" - - // errMsgProcessProviderEmptyCmd command must not be empty - errMsgProcessProviderEmptyCmd = "command must not be empty" - - // errMsgProcessProviderPipe failed to initialize pipe - errMsgProcessProviderPipe = "failed to initialize pipe" - - // DefaultDuration is the default amount of time in minutes that the - // credentials will be valid for. - DefaultDuration = time.Duration(15) * time.Minute - - // DefaultBufSize limits buffer size from growing to an enormous - // amount due to a faulty process. - DefaultBufSize = 1024 - - // DefaultTimeout default limit on time a process can run. - DefaultTimeout = time.Duration(1) * time.Minute -) - -// ProcessProvider satisfies the credentials.Provider interface, and is a -// client to retrieve credentials from a process. -type ProcessProvider struct { - staticCreds bool - credentials.Expiry - originalCommand []string - - // Expiry duration of the credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // A string representing an os command that should return a JSON with - // credential information. - command *exec.Cmd - - // MaxBufSize limits memory usage from growing to an enormous - // amount due to a faulty process. - MaxBufSize int - - // Timeout limits the time a process can run. - Timeout time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// ProcessProvider. The credentials will expire every 15 minutes by default. -func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: exec.Command(command), - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsTimeout returns a pointer to a new Credentials object with -// the specified command and timeout, and default duration and max buffer size. -func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { - p := NewCredentials(command, func(opt *ProcessProvider) { - opt.Timeout = timeout - }) - - return p -} - -// NewCredentialsCommand returns a pointer to a new Credentials object with -// the specified command, and default timeout, duration and max buffer size. -func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: command, - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` - SecretAccessKey string - SessionToken string - Expiration *time.Time -} - -// Retrieve executes the 'credential_process' and returns the credentials. -func (p *ProcessProvider) Retrieve() (credentials.Value, error) { - out, err := p.executeCredentialProcess() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // Serialize and validate response - resp := &credentialProcessResponse{} - if err = json.Unmarshal(out, resp); err != nil { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderParse, - fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), - err) - } - - if resp.Version != 1 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderVersion, - errMsgProcessProviderVersion, - nil) - } - - if len(resp.AccessKeyID) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissKey, - nil) - } - - if len(resp.SecretAccessKey) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissSecret, - nil) - } - - // Handle expiration - p.staticCreds = resp.Expiration == nil - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } - - return credentials.Value{ - ProviderName: ProviderName, - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.SessionToken, - }, nil -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *ProcessProvider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// prepareCommand prepares the command to be executed. -func (p *ProcessProvider) prepareCommand() error { - - var cmdArgs []string - if runtime.GOOS == "windows" { - cmdArgs = []string{"cmd.exe", "/C"} - } else { - cmdArgs = []string{"sh", "-c"} - } - - if len(p.originalCommand) == 0 { - p.originalCommand = make([]string, len(p.command.Args)) - copy(p.originalCommand, p.command.Args) - - // check for empty command because it succeeds - if len(strings.TrimSpace(p.originalCommand[0])) < 1 { - return awserr.New( - ErrCodeProcessProviderExecution, - fmt.Sprintf( - "%s: %s", - errMsgProcessProviderPrepareCmd, - errMsgProcessProviderEmptyCmd), - nil) - } - } - - cmdArgs = append(cmdArgs, p.originalCommand...) - p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) - p.command.Env = os.Environ() - - return nil -} - -// executeCredentialProcess starts the credential process on the OS and -// returns the results or an error. -func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { - - if err := p.prepareCommand(); err != nil { - return nil, err - } - - // Setup the pipes - outReadPipe, outWritePipe, err := os.Pipe() - if err != nil { - return nil, awserr.New( - ErrCodeProcessProviderExecution, - errMsgProcessProviderPipe, - err) - } - - p.command.Stderr = os.Stderr // display stderr on console for MFA - p.command.Stdout = outWritePipe // get creds json on process's stdout - p.command.Stdin = os.Stdin // enable stdin for MFA - - output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) - - stdoutCh := make(chan error, 1) - go readInput( - io.LimitReader(outReadPipe, int64(p.MaxBufSize)), - output, - stdoutCh) - - execCh := make(chan error, 1) - go executeCommand(*p.command, execCh) - - finished := false - var errors []error - for !finished { - select { - case readError := <-stdoutCh: - errors = appendError(errors, readError) - finished = true - case execError := <-execCh: - err := outWritePipe.Close() - errors = appendError(errors, err) - errors = appendError(errors, execError) - if errors != nil { - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderProcess, - errors) - } - case <-time.After(p.Timeout): - finished = true - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderTimeout, - errors) // errors can be nil - } - } - - out := output.Bytes() - - if runtime.GOOS == "windows" { - // windows adds slashes to quotes - out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) - } - - return out, nil -} - -// appendError conveniently checks for nil before appending slice -func appendError(errors []error, err error) []error { - if err != nil { - return append(errors, err) - } - return errors -} - -func executeCommand(cmd exec.Cmd, exec chan error) { - // Start the command - err := cmd.Start() - if err == nil { - err = cmd.Wait() - } - - exec <- err -} - -func readInput(r io.Reader, w io.Writer, read chan error) { - tee := io.TeeReader(r, w) - - _, err := ioutil.ReadAll(tee) - - if err == io.EOF { - err = nil - } - - read <- err // will only arrive here when write end of pipe is closed -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 2e528d13..4108e433 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -80,18 +80,16 @@ package stscreds import ( "fmt" - "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/internal/sdkrand" "github.com/aws/aws-sdk-go/service/sts" ) -// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. // An error is returned if reading from stdin fails. // // Use this function go read MFA tokens from stdin. The function makes no attempt @@ -104,7 +102,7 @@ import ( // Will wait forever until something is provided on the stdin. func StdinTokenProvider() (string, error) { var v string - fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + fmt.Printf("Assume Role MFA token code: ") _, err := fmt.Scanln(&v) return v, err @@ -195,18 +193,6 @@ type AssumeRoleProvider struct { // // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration - - // MaxJitterFrac reduces the effective Duration of each credential requested - // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must - // have a value between 0 and 1. Any other value may lead to expected behavior. - // With a MaxJitterFrac value of 0, default) will no jitter will be used. - // - // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the - // AssumeRole call will be made with an arbitrary Duration between 27m and - // 30m. - // - // MaxJitterFrac should not be negative. - MaxJitterFrac float64 } // NewCredentials returns a pointer to a new Credentials object wrapping the @@ -258,6 +244,7 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(* // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. @@ -267,9 +254,8 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { // Expire as often as AWS permits. p.Duration = DefaultDuration } - jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), RoleArn: aws.String(p.RoleARN), RoleSessionName: aws.String(p.RoleSessionName), ExternalId: p.ExternalID, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go deleted file mode 100644 index b20b6339..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ /dev/null @@ -1,100 +0,0 @@ -package stscreds - -import ( - "fmt" - "io/ioutil" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" -) - -const ( - // ErrCodeWebIdentity will be used as an error code when constructing - // a new error to be returned during session creation or retrieval. - ErrCodeWebIdentity = "WebIdentityErr" - - // WebIdentityProviderName is the web identity provider name - WebIdentityProviderName = "WebIdentityCredentials" -) - -// now is used to return a time.Time object representing -// the current time. This can be used to easily test and -// compare test values. -var now = time.Now - -// WebIdentityRoleProvider is used to retrieve credentials using -// an OIDC token. -type WebIdentityRoleProvider struct { - credentials.Expiry - - client stsiface.STSAPI - ExpiryWindow time.Duration - - tokenFilePath string - roleARN string - roleSessionName string -} - -// NewWebIdentityCredentials will return a new set of credentials with a given -// configuration, role arn, and token file path. -func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { - svc := sts.New(c) - p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) - return credentials.NewCredentials(p) -} - -// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the -// provided stsiface.STSAPI -func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { - return &WebIdentityRoleProvider{ - client: svc, - tokenFilePath: path, - roleARN: roleARN, - roleSessionName: roleSessionName, - } -} - -// Retrieve will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { - b, err := ioutil.ReadFile(p.tokenFilePath) - if err != nil { - errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath) - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err) - } - - sessionName := p.roleSessionName - if len(sessionName) == 0 { - // session name is used to uniquely identify a session. This simply - // uses unix time in nanoseconds to uniquely identify sessions. - sessionName = strconv.FormatInt(now().UnixNano(), 10) - } - req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ - RoleArn: &p.roleARN, - RoleSessionName: &sessionName, - WebIdentityToken: aws.String(string(b)), - }) - // InvalidIdentityToken error is a temporary error that can occur - // when assuming an Role with a JWT web identity token. - req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) - if err := req.Send(); err != nil { - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) - } - - p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) - - value := credentials.Value{ - AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), - SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), - SessionToken: aws.StringValue(resp.Credentials.SessionToken), - ProviderName: WebIdentityProviderName, - } - return value, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go index 25a66d1d..152d785b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -1,61 +1,30 @@ -// Package csm provides the Client Side Monitoring (CSM) client which enables -// sending metrics via UDP connection to the CSM agent. This package provides -// control options, and configuration for the CSM client. The client can be -// controlled manually, or automatically via the SDK's Session configuration. +// Package csm provides Client Side Monitoring (CSM) which enables sending metrics +// via UDP connection. Using the Start function will enable the reporting of +// metrics on a given port. If Start is called, with different parameters, again, +// a panic will occur. // -// Enabling CSM client via SDK's Session configuration -// -// The CSM client can be enabled automatically via SDK's Session configuration. -// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT -// environment variable is set to a non-empty value. -// -// The configuration options for the CSM client via the SDK's session -// configuration are: -// -// * AWS_CSM_PORT= -// The port number the CSM agent will receive metrics on. -// -// * AWS_CSM_HOST= -// The hostname, or IP address the CSM agent will receive metrics on. -// Without port number. -// -// Manually enabling the CSM client -// -// The CSM client can be started, paused, and resumed manually. The Start -// function will enable the CSM client to publish metrics to the CSM agent. It -// is safe to call Start concurrently, but if Start is called additional times -// with different ClientID or address it will panic. +// Pause can be called to pause any metrics publishing on a given port. Sessions +// that have had their handlers modified via InjectHandlers may still be used. +// However, the handlers will act as a no-op meaning no metrics will be published. // +// Example: // r, err := csm.Start("clientID", ":31000") // if err != nil { // panic(fmt.Errorf("failed starting CSM: %v", err)) // } // -// When controlling the CSM client manually, you must also inject its request -// handlers into the SDK's Session configuration for the SDK's API clients to -// publish metrics. -// // sess, err := session.NewSession(&aws.Config{}) // if err != nil { // panic(fmt.Errorf("failed loading session: %v", err)) // } // -// // Add CSM client's metric publishing request handlers to the SDK's -// // Session Configuration. // r.InjectHandlers(&sess.Handlers) // -// Controlling CSM client -// -// Once the CSM client has been enabled the Get function will return a Reporter -// value that you can use to pause and resume the metrics published to the CSM -// agent. If Get function is called before the reporter is enabled with the -// Start function or via SDK's Session configuration nil will be returned. -// -// The Pause method can be called to stop the CSM client publishing metrics to -// the CSM agent. The Continue method will resume metric publishing. -// -// // Get the CSM client Reporter. -// r := csm.Get() +// client := s3.New(sess) +// resp, err := client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) // // // Will pause monitoring // r.Pause() @@ -66,4 +35,12 @@ // // // Resume monitoring // r.Continue() +// +// Start returns a Reporter that is used to enable or disable monitoring. If +// access to the Reporter is required later, calling Get will return the Reporter +// singleton. +// +// Example: +// r := csm.Get() +// r.Continue() package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go index 4b19e280..2f0c6eac 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -2,7 +2,6 @@ package csm import ( "fmt" - "strings" "sync" ) @@ -10,40 +9,19 @@ var ( lock sync.Mutex ) +// Client side metric handler names const ( - // DefaultPort is used when no port is specified. - DefaultPort = "31000" - - // DefaultHost is the host that will be used when none is specified. - DefaultHost = "127.0.0.1" + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" ) -// AddressWithDefaults returns a CSM address built from the host and port -// values. If the host or port is not set, default values will be used -// instead. If host is "localhost" it will be replaced with "127.0.0.1". -func AddressWithDefaults(host, port string) string { - if len(host) == 0 || strings.EqualFold(host, "localhost") { - host = DefaultHost - } - - if len(port) == 0 { - port = DefaultPort - } - - // Only IP6 host can contain a colon - if strings.Contains(host, ":") { - return "[" + host + "]:" + port - } - - return host + ":" + port -} - -// Start will start a long running go routine to capture +// Start will start the a long running go routine to capture // client side metrics. Calling start multiple time will only // start the metric listener once and will panic if a different // client ID or port is passed in. // -// r, err := csm.Start("clientID", "127.0.0.1:31000") +// Example: +// r, err := csm.Start("clientID", "127.0.0.1:8094") // if err != nil { // panic(fmt.Errorf("expected no error, but received %v", err)) // } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go index 5bacc791..6f57024d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -3,8 +3,6 @@ package csm import ( "strconv" "time" - - "github.com/aws/aws-sdk-go/aws" ) type metricTime time.Time @@ -41,12 +39,6 @@ type metric struct { SDKException *string `json:"SdkException,omitempty"` SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` - FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` - FinalAWSException *string `json:"FinalAwsException,omitempty"` - FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` - FinalSDKException *string `json:"FinalSdkException,omitempty"` - FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` - DestinationIP *string `json:"DestinationIp,omitempty"` ConnectionReused *int `json:"ConnectionReused,omitempty"` @@ -59,51 +51,3 @@ type metric struct { MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` } - -func (m *metric) TruncateFields() { - m.ClientID = truncateString(m.ClientID, 255) - m.UserAgent = truncateString(m.UserAgent, 256) - - m.AWSException = truncateString(m.AWSException, 128) - m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) - - m.SDKException = truncateString(m.SDKException, 128) - m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) - - m.FinalAWSException = truncateString(m.FinalAWSException, 128) - m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) - - m.FinalSDKException = truncateString(m.FinalSDKException, 128) - m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) -} - -func truncateString(v *string, l int) *string { - if v != nil && len(*v) > l { - nv := (*v)[:l] - return &nv - } - - return v -} - -func (m *metric) SetException(e metricException) { - switch te := e.(type) { - case awsException: - m.AWSException = aws.String(te.exception) - m.AWSExceptionMessage = aws.String(te.message) - case sdkException: - m.SDKException = aws.String(te.exception) - m.SDKExceptionMessage = aws.String(te.message) - } -} - -func (m *metric) SetFinalException(e metricException) { - switch te := e.(type) { - case awsException: - m.FinalAWSException = aws.String(te.exception) - m.FinalAWSExceptionMessage = aws.String(te.message) - case sdkException: - m.FinalSDKException = aws.String(te.exception) - m.FinalSDKExceptionMessage = aws.String(te.message) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go index 82a3e345..514fc373 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -16,26 +16,25 @@ var ( type metricChan struct { ch chan metric - paused *int64 + paused int64 } func newMetricChan(size int) metricChan { return metricChan{ - ch: make(chan metric, size), - paused: new(int64), + ch: make(chan metric, size), } } func (ch *metricChan) Pause() { - atomic.StoreInt64(ch.paused, pausedEnum) + atomic.StoreInt64(&ch.paused, pausedEnum) } func (ch *metricChan) Continue() { - atomic.StoreInt64(ch.paused, runningEnum) + atomic.StoreInt64(&ch.paused, runningEnum) } func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(ch.paused) + v := atomic.LoadInt64(&ch.paused) return v == pausedEnum } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go deleted file mode 100644 index 54a99280..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go +++ /dev/null @@ -1,26 +0,0 @@ -package csm - -type metricException interface { - Exception() string - Message() string -} - -type requestException struct { - exception string - message string -} - -func (e requestException) Exception() string { - return e.exception -} -func (e requestException) Message() string { - return e.message -} - -type awsException struct { - requestException -} - -type sdkException struct { - requestException -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go index c7008d8c..11861844 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -10,6 +10,11 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) +const ( + // DefaultPort is used when no port is specified + DefaultPort = "31000" +) + // Reporter will gather metrics of API requests made and // send those metrics to the CSM endpoint. type Reporter struct { @@ -77,29 +82,26 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { if r.Error != nil { if awserr, ok := r.Error.(awserr.Error); ok { - m.SetException(getMetricException(awserr)) + setError(&m, awserr) } } - m.TruncateFields() rep.metricsCh.Push(m) } -func getMetricException(err awserr.Error) metricException { +func setError(m *metric, err awserr.Error) { msg := err.Error() code := err.Code() switch code { case "RequestError", - request.ErrCodeSerialization, + "SerializationError", request.CanceledErrorCode: - return sdkException{ - requestException{exception: code, message: msg}, - } + m.SDKException = &code + m.SDKExceptionMessage = &msg default: - return awsException{ - requestException{exception: code, message: msg}, - } + m.AWSException = &code + m.AWSExceptionMessage = &msg } } @@ -114,27 +116,14 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) { API: aws.String(r.Operation.Name), Service: aws.String(r.ClientInfo.ServiceID), Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), Type: aws.String("ApiCall"), AttemptCount: aws.Int(r.RetryCount + 1), Region: r.Config.Region, - Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), XAmzRequestID: aws.String(r.RequestID), MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), } - if r.HTTPResponse != nil { - m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetFinalException(getMetricException(awserr)) - } - } - - m.TruncateFields() - // TODO: Probably want to figure something out for logging dropped // metrics rep.metricsCh.Push(m) @@ -185,9 +174,8 @@ func (rep *Reporter) start() { } } -// Pause will pause the metric channel preventing any new metrics from being -// added. It is safe to call concurrently with other calls to Pause, but if -// called concurently with Continue can lead to unexpected state. +// Pause will pause the metric channel preventing any new metrics from +// being added. func (rep *Reporter) Pause() { lock.Lock() defer lock.Unlock() @@ -199,9 +187,8 @@ func (rep *Reporter) Pause() { rep.close() } -// Continue will reopen the metric channel and allow for monitoring to be -// resumed. It is safe to call concurrently with other calls to Continue, but -// if called concurently with Pause can lead to unexpected state. +// Continue will reopen the metric channel and allow for monitoring +// to be resumed. func (rep *Reporter) Continue() { lock.Lock() defer lock.Unlock() @@ -216,18 +203,10 @@ func (rep *Reporter) Continue() { rep.metricsCh.Continue() } -// Client side metric handler names -const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" -) - // InjectHandlers will will enable client side metrics and inject the proper // handlers to handle how metrics are sent. // -// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers -// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). -// +// Example: // // Start must be called in order to inject the correct handlers // r, err := csm.Start("clientID", "127.0.0.1:8094") // if err != nil { @@ -244,15 +223,13 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { return } - handlers.Complete.PushFrontNamed(request.NamedHandler{ - Name: APICallMetricHandlerName, - Fn: rep.sendAPICallMetric, - }) + apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric} + apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric} + + handlers.Complete.PushFrontNamed(apiCallHandler) + handlers.Complete.PushFrontNamed(apiCallAttemptHandler) - handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ - Name: APICallAttemptMetricHandlerName, - Fn: rep.sendAPICallAttemptMetric, - }) + handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler) } // boolIntValue return 1 for true and 0 for false. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index d126764c..c215cd3f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -24,9 +24,8 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) - err := req.Send() - return output.Content, err + return output.Content, req.Send() } // GetUserData returns the userdata that was configured for the service. If @@ -46,9 +45,8 @@ func (c *EC2Metadata) GetUserData() (string, error) { r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) } }) - err := req.Send() - return output.Content, err + return output.Content, req.Send() } // GetDynamicData uses the path provided to request information from the EC2 @@ -63,9 +61,8 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) - err := req.Send() - return output.Content, err + return output.Content, req.Send() } // GetInstanceIdentityDocument retrieves an identity document describing an @@ -82,7 +79,7 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument doc := EC2InstanceIdentityDocument{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { return EC2InstanceIdentityDocument{}, - awserr.New(request.ErrCodeSerialization, + awserr.New("SerializationError", "failed to decode EC2 instance identity document", err) } @@ -101,7 +98,7 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { info := EC2IAMInfo{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { return EC2IAMInfo{}, - awserr.New(request.ErrCodeSerialization, + awserr.New("SerializationError", "failed to decode EC2 IAM info", err) } @@ -121,10 +118,6 @@ func (c *EC2Metadata) Region() (string, error) { return "", err } - if len(resp) == 0 { - return "", awserr.New("EC2MetadataError", "invalid Region response", nil) - } - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 return resp[:len(resp)-1], nil } @@ -152,19 +145,18 @@ type EC2IAMInfo struct { // An EC2InstanceIdentityDocument provides the shape for unmarshaling // an instance identity document type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - MarketplaceProductCodes []string `json:"marketplaceProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 4c5636e3..53457cac 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -4,7 +4,7 @@ // This package's client can be disabled completely by setting the environment // variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to // true instructs the SDK to disable the EC2 Metadata client. The client cannot -// be used while the environment variable is set to true, (case insensitive). +// be used while the environemnt variable is set to true, (case insensitive). package ec2metadata import ( @@ -92,9 +92,6 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.Send.SwapNamed(request.NamedHandler{ Name: corehandlers.SendHandler.Name, Fn: func(r *request.Request) { - r.HTTPResponse = &http.Response{ - Header: http.Header{}, - } r.Error = awserr.New( request.CanceledErrorCode, "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", @@ -123,7 +120,7 @@ func unmarshalHandler(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err) + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) return } @@ -136,7 +133,7 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err) + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 87b9ff3f..1ddeae10 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -85,7 +85,6 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol custAddS3DualStack(p) custRmIotDataService(p) custFixAppAutoscalingChina(p) - custFixAppAutoscalingUsGov(p) } return ps, nil @@ -150,33 +149,6 @@ func custFixAppAutoscalingChina(p *partition) { p.Services[serviceName] = s } -func custFixAppAutoscalingUsGov(p *partition) { - if p.ID != "aws-us-gov" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - if a := s.Defaults.CredentialScope.Service; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) - return - } - - if a := s.Defaults.Hostname; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) - return - } - - s.Defaults.CredentialScope.Service = "application-autoscaling" - s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" - - p.Services[serviceName] = s -} - type decodeModelError struct { awsError } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 452cefda..c01c9018 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -11,13 +11,10 @@ const ( AwsPartitionID = "aws" // AWS Standard partition. AwsCnPartitionID = "aws-cn" // AWS China partition. AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. - AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. - AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. ) // AWS Standard partition's regions. const ( - ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). @@ -25,11 +22,9 @@ const ( ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). CaCentral1RegionID = "ca-central-1" // Canada (Central). EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). - EuNorth1RegionID = "eu-north-1" // EU (Stockholm). EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). EuWest3RegionID = "eu-west-3" // EU (Paris). - MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -49,18 +44,143 @@ const ( UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). ) -// AWS ISO (US) partition's regions. +// Service identifiers const ( - UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. -) - -// AWS ISOB (US) partition's regions. -const ( - UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. ) // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -68,7 +188,7 @@ func DefaultResolver() Resolver { } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -82,8 +202,6 @@ var defaultPartitions = partitions{ awsPartition, awscnPartition, awsusgovPartition, - awsisoPartition, - awsisobPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -97,7 +215,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") return reg }(), }, @@ -107,9 +225,6 @@ var awsPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ - "ap-east-1": region{ - Description: "Asia Pacific (Hong Kong)", - }, "ap-northeast-1": region{ Description: "Asia Pacific (Tokyo)", }, @@ -131,9 +246,6 @@ var awsPartition = partition{ "eu-central-1": region{ Description: "EU (Frankfurt)", }, - "eu-north-1": region{ - Description: "EU (Stockholm)", - }, "eu-west-1": region{ Description: "EU (Ireland)", }, @@ -143,9 +255,6 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "EU (Paris)", }, - "me-south-1": region{ - Description: "Middle East (Bahrain)", - }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -172,7 +281,6 @@ var awsPartition = partition{ "acm": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -180,11 +288,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -197,147 +303,26 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "api.ecr": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "api.ecr.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - "ap-northeast-1": endpoint{ - Hostname: "api.ecr.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "api.ecr.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "api.ecr.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "api.ecr.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "api.ecr.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "ca-central-1": endpoint{ - Hostname: "api.ecr.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{ - Hostname: "api.ecr.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-north-1": endpoint{ - Hostname: "api.ecr.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "api.ecr.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "api.ecr.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "api.ecr.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "me-south-1": endpoint{ - Hostname: "api.ecr.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "api.ecr.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "api.ecr.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "api.ecr.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{ - Hostname: "api.ecr.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{ - Hostname: "api.ecr.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, "api.mediatailor": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "api.pricing": service{ @@ -354,7 +339,6 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -362,45 +346,17 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "apigateway": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -408,11 +364,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -429,7 +383,6 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -437,11 +390,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -449,24 +400,6 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "appmesh": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "appstream2": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -476,10 +409,6 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -495,7 +424,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -504,15 +432,12 @@ var awsPartition = partition{ "athena": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -525,7 +450,6 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -533,11 +457,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -554,42 +476,16 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "backup": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "batch": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -597,11 +493,8 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -655,9 +548,7 @@ var awsPartition = partition{ "cloud9": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -669,7 +560,6 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -681,7 +571,6 @@ var awsPartition = partition{ "cloudformation": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -689,11 +578,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -737,7 +624,6 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -745,11 +631,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -774,7 +658,6 @@ var awsPartition = partition{ "cloudtrail": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -782,11 +665,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -797,7 +678,6 @@ var awsPartition = partition{ "codebuild": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -805,11 +685,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -851,7 +729,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -871,7 +748,6 @@ var awsPartition = partition{ "codedeploy": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -879,11 +755,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -925,7 +799,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1007,25 +880,9 @@ var awsPartition = partition{ Defaults: endpoint{ Protocols: []string{"https"}, }, - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "comprehendmedical": service{ - Endpoints: endpoints{ "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1034,7 +891,6 @@ var awsPartition = partition{ "config": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1042,11 +898,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1054,35 +908,12 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "connect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "cur": service{ Endpoints: endpoints{ "us-east-1": endpoint{}, }, }, - "data.mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "datapipeline": service{ Endpoints: endpoints{ @@ -1093,46 +924,6 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "datasync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "dax": service{ Endpoints: endpoints{ @@ -1140,7 +931,6 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -1158,7 +948,6 @@ var awsPartition = partition{ "directconnect": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1166,11 +955,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1187,7 +974,6 @@ var awsPartition = partition{ "dms": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1195,11 +981,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1207,66 +991,7 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "docdb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "ds": service{ + "ds": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1276,7 +1001,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, @@ -1291,24 +1015,16 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1316,36 +1032,11 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, }, }, "ec2": service{ @@ -1353,7 +1044,6 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1361,11 +1051,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1384,10 +1072,29 @@ var awsPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "ecs": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1395,11 +1102,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1410,7 +1115,6 @@ var awsPartition = partition{ "elasticache": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1418,7 +1122,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1428,18 +1131,16 @@ var awsPartition = partition{ Region: "us-west-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticbeanstalk": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1447,11 +1148,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1464,14 +1163,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1483,7 +1178,6 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1491,11 +1185,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1509,7 +1201,6 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1519,12 +1210,10 @@ var awsPartition = partition{ "eu-central-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, @@ -1549,12 +1238,9 @@ var awsPartition = partition{ "email": service{ Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "entitlement.marketplace": service{ @@ -1570,7 +1256,6 @@ var awsPartition = partition{ "es": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1578,28 +1263,19 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "events": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1607,11 +1283,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1622,7 +1296,6 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1630,7 +1303,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1646,33 +1318,9 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fsx": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "gamelift": service{ @@ -1699,7 +1347,6 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1707,11 +1354,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1722,7 +1367,6 @@ var awsPartition = partition{ "glue": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1730,15 +1374,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1749,32 +1388,19 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "groundstation": service{ - - Endpoints: endpoints{ - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "guardduty": service{ IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1782,40 +1408,14 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "health": service{ @@ -1860,9 +1460,7 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1876,23 +1474,16 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1907,143 +1498,39 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "iotevents": service{ + "kinesis": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "ioteventsdata": service{ + "kinesisanalytics": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "eu-central-1": endpoint{ - Hostname: "data.iotevents.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "data.iotevents.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "data.iotevents.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "data.iotevents.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "data.iotevents.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "iotthingsgraph": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "iotthingsgraph", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kafka": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "kinesisvideo": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, @@ -2053,7 +1540,6 @@ var awsPartition = partition{ "kms": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2061,11 +1547,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2073,43 +1557,9 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "lakeformation": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "lambda": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2117,11 +1567,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2150,7 +1598,6 @@ var awsPartition = partition{ "logs": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2158,11 +1605,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2183,26 +1628,6 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, - "mediaconnect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "mediaconvert": service{ Endpoints: endpoints{ @@ -2215,7 +1640,6 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2232,7 +1656,6 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -2249,7 +1672,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -2264,7 +1686,6 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -2277,7 +1698,6 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2285,11 +1705,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2326,7 +1744,6 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2334,11 +1751,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2346,25 +1761,6 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "mq": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "mturk-requester": service{ IsRegionalized: boxedFalse, @@ -2378,48 +1774,12 @@ var awsPartition = partition{ "neptune": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, - "eu-north-1": endpoint{ - Hostname: "rds.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, "eu-west-1": endpoint{ Hostname: "rds.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2506,12 +1866,8 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, }, }, "polly": service{ @@ -2524,7 +1880,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2535,52 +1890,9 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "projects.iot1click": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ram": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "rds": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2588,11 +1900,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{dnsSuffix}", @@ -2605,7 +1915,6 @@ var awsPartition = partition{ "redshift": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2613,11 +1922,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2629,23 +1936,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "resource-groups": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2653,51 +1953,13 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "robomaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2720,10 +1982,20 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, - "route53resolver": service{ + "runtime.lex": service{ Defaults: endpoint{ - Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, + }, + "runtime.sagemaker": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2734,74 +2006,15 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "s3": service{ - PartitionEndpoint: "us-east-1", - IsRegionalized: boxedTrue, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, @@ -2810,7 +2023,6 @@ var awsPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{ Hostname: "s3.ap-northeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -2827,14 +2039,12 @@ var awsPartition = partition{ }, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -2919,13 +2129,6 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, - "eu-north-1": endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, "eu-west-1": endpoint{ Hostname: "s3-control.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, @@ -3040,7 +2243,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3075,28 +2277,6 @@ var awsPartition = partition{ }, }, }, - "securityhub": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "serverlessrepo": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -3123,18 +2303,12 @@ var awsPartition = partition{ "eu-central-1": endpoint{ Protocols: []string{"https"}, }, - "eu-north-1": endpoint{ - Protocols: []string{"https"}, - }, "eu-west-1": endpoint{ Protocols: []string{"https"}, }, "eu-west-2": endpoint{ Protocols: []string{"https"}, }, - "eu-west-3": endpoint{ - Protocols: []string{"https"}, - }, "sa-east-1": endpoint{ Protocols: []string{"https"}, }, @@ -3162,7 +2336,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3200,7 +2373,6 @@ var awsPartition = partition{ "servicediscovery": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3208,11 +2380,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3220,20 +2390,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "session.qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "shield": service{ IsRegionalized: boxedFalse, Defaults: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", + SSLCommonName: "Shield.us-east-1.amazonaws.com", Protocols: []string{"https"}, }, Endpoints: endpoints{ @@ -3243,7 +2403,6 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3251,11 +2410,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3267,7 +2424,6 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3288,7 +2444,6 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3296,11 +2451,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3314,7 +2467,6 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3322,7 +2474,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3350,8 +2501,7 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", }, @@ -3363,7 +2513,6 @@ var awsPartition = partition{ "ssm": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3371,11 +2520,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3386,7 +2533,6 @@ var awsPartition = partition{ "states": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3394,12 +2540,8 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3409,7 +2551,6 @@ var awsPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3417,11 +2558,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3443,17 +2582,10 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -3461,36 +2593,11 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, }, }, "sts": service{ @@ -3502,12 +2609,6 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "sts.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{ Hostname: "sts.ap-northeast-2.amazonaws.com", @@ -3521,18 +2622,11 @@ var awsPartition = partition{ "aws-global": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{ - Hostname: "sts.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "sts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -3563,21 +2657,14 @@ var awsPartition = partition{ }, }, "support": service{ - PartitionEndpoint: "aws-global", Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "support.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + "us-east-1": endpoint{}, }, }, "swf": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3585,11 +2672,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3599,29 +2684,6 @@ var awsPartition = partition{ }, "tagging": service{ - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transfer": service{ - Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3630,7 +2692,6 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3646,14 +2707,8 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "translate-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -3693,17 +2748,9 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3750,7 +2797,6 @@ var awsPartition = partition{ "xray": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3758,11 +2804,9 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3802,23 +2846,6 @@ var awscnPartition = partition{ }, }, Services: services{ - "api.ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - "cn-northwest-1": endpoint{ - Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, "apigateway": service{ Endpoints: endpoints{ @@ -3855,20 +2882,6 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, - "cloudfront": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, "cloudtrail": service{ Endpoints: endpoints{ @@ -3953,6 +2966,13 @@ var awscnPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "ecs": service{ Endpoints: endpoints{ @@ -4006,19 +3026,6 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, - "firehose": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, "glacier": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -4028,15 +3035,6 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, "iam": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -4057,8 +3055,7 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "cn-north-1": endpoint{}, }, }, "kinesis": service{ @@ -4068,68 +3065,37 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, - "kms": service{ + "lambda": service{ Endpoints: endpoints{ "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, - "lambda": service{ + "logs": service{ Endpoints: endpoints{ "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, - "license-manager": service{ - + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, - "logs": service{ + "rds": service{ Endpoints: endpoints{ "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "redshift": service{ + "redshift": service{ Endpoints: endpoints{ "cn-north-1": endpoint{}, @@ -4207,13 +3173,6 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, - "states": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, "storagegateway": service{ Endpoints: endpoints{ @@ -4239,18 +3198,6 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, - "support": service{ - PartitionEndpoint: "aws-cn-global", - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "support.cn-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, "swf": service{ Endpoints: endpoints{ @@ -4304,32 +3251,6 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "api.ecr": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "api.ecr.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "api.ecr.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, "api.sagemaker": service{ Endpoints: endpoints{ @@ -4344,18 +3265,6 @@ var awsusgovPartition = partition{ }, }, "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "athena": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, @@ -4397,7 +3306,6 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4408,30 +3316,10 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "codebuild": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, "codedeploy": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", @@ -4441,14 +3329,6 @@ var awsusgovPartition = partition{ }, }, }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, "config": service{ Endpoints: endpoints{ @@ -4456,18 +3336,6 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "datasync": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, "directconnect": service{ Endpoints: endpoints{ @@ -4482,23 +3350,10 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "ds": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, "dynamodb": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", @@ -4526,6 +3381,13 @@ var awsusgovPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "ecs": service{ Endpoints: endpoints{ @@ -4553,12 +3415,6 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, "elasticloadbalancing": service{ Endpoints: endpoints{ @@ -4580,13 +3436,6 @@ var awsusgovPartition = partition{ "es": service{ Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4597,13 +3446,6 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "firehose": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, "glacier": service{ Endpoints: endpoints{ @@ -4613,22 +3455,6 @@ var awsusgovPartition = partition{ }, }, }, - "glue": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, "guardduty": service{ IsRegionalized: boxedTrue, Defaults: endpoint{ @@ -4638,12 +3464,6 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "health": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, "iam": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -4684,12 +3504,6 @@ var awsusgovPartition = partition{ "kms": service{ Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4701,13 +3515,6 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "license-manager": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, "logs": service{ Endpoints: endpoints{ @@ -4715,12 +3522,6 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, "metering.marketplace": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -4728,7 +3529,6 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4739,49 +3539,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "neptune": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, "polly": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, - "ram": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, "rds": service{ Endpoints: endpoints{ @@ -4802,19 +3565,6 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "route53": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, "runtime.sagemaker": service{ Endpoints: endpoints{ @@ -4878,54 +3628,16 @@ var awsusgovPartition = partition{ }, }, }, - "secretsmanager": service{ + "sms": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, }, }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + "snowball": service{ + Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-gov-west-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4976,12 +3688,6 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", @@ -5026,626 +3732,5 @@ var awsusgovPartition = partition{ }, }, }, - "waf-regional": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - }, -} - -// AwsIsoPartition returns the Resolver for AWS ISO (US). -func AwsIsoPartition() Partition { - return awsisoPartition.Partition() -} - -var awsisoPartition = partition{ - ID: "aws-iso", - Name: "AWS ISO (US)", - DNSSuffix: "c2s.ic.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-iso-east-1": region{ - Description: "US ISO East", - }, - }, - Services: services{ - "api.ecr": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "autoscaling": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "iam.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - "us-iso-east-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "route53.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "sns": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sqs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-global", - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ - Hostname: "support.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, - }, - }, - }, -} - -// AwsIsoBPartition returns the Resolver for AWS ISOB (US). -func AwsIsoBPartition() Partition { - return awsisobPartition.Partition() -} - -var awsisobPartition = partition{ - ID: "aws-iso-b", - Name: "AWS ISOB (US)", - DNSSuffix: "sc2s.sgov.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-isob-east-1": region{ - Description: "US ISOB East (Ohio)", - }, - }, - Services: services{ - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-b-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ - Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - "us-isob-east-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-b-global", - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ - Hostname: "support.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, - }, - }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go deleted file mode 100644 index ca8fc828..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go +++ /dev/null @@ -1,141 +0,0 @@ -package endpoints - -// Service identifiers -// -// Deprecated: Use client package's EndpointsID value instead of these -// ServiceIDs. These IDs are not maintained, and are out of date. -const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AppsyncServiceID = "appsync" // Appsync. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - ChimeServiceID = "chime" // Chime. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - IotanalyticsServiceID = "iotanalytics" // Iotanalytics. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - S3ControlServiceID = "s3-control" // S3Control. - SagemakerServiceID = "api.sagemaker" // Sagemaker. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TransferServiceID = "transfer" // Transfer. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 9c936be6..e29c0951 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -35,7 +35,7 @@ type Options struct { // // If resolving an endpoint on the partition list the provided region will // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unknown and resolving + // endpoint ID with. If both the service and region are unkonwn and resolving // the endpoint on partition list an UnknownEndpointError error will be returned. // // If resolving and endpoint on a partition specific resolver that partition's @@ -170,13 +170,10 @@ func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { // A Partition provides the ability to enumerate the partition's regions // and services. type Partition struct { - id, dnsSuffix string - p *partition + id string + p *partition } -// DNSSuffix returns the base domain name of the partition. -func (p Partition) DNSSuffix() string { return p.dnsSuffix } - // ID returns the identifier of the partition. func (p Partition) ID() string { return p.id } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 523ad79a..ff6f76db 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -54,9 +54,8 @@ type partition struct { func (p partition) Partition() Partition { return Partition{ - dnsSuffix: p.DNSSuffix, - id: p.ID, - p: &p, + id: p.ID, + p: &p, } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go index 0fdfcc56..05e92df2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -16,10 +16,6 @@ import ( type CodeGenOptions struct { // Options for how the model will be decoded. DecodeModelOptions DecodeModelOptions - - // Disables code generation of the service endpoint prefix IDs defined in - // the model. - DisableGenerateServiceIDs bool } // Set combines all of the option functions together @@ -43,16 +39,8 @@ func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGe return err } - v := struct { - Resolver - CodeGenOptions - }{ - Resolver: resolver, - CodeGenOptions: opts, - } - tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { return fmt.Errorf("failed to execute template, %v", err) } @@ -178,17 +166,15 @@ import ( "regexp" ) - {{ template "partition consts" $.Resolver }} + {{ template "partition consts" . }} - {{ range $_, $partition := $.Resolver }} + {{ range $_, $partition := . }} {{ template "partition region consts" $partition }} {{ end }} - {{ if not $.DisableGenerateServiceIDs -}} - {{ template "service consts" $.Resolver }} - {{- end }} + {{ template "service consts" . }} - {{ template "endpoint resolvers" $.Resolver }} + {{ template "endpoint resolvers" . }} {{- end }} {{ define "partition consts" }} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go index d9b37f4d..271da432 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -1,17 +1,18 @@ +// +build !appengine,!plan9 + package request import ( - "strings" + "net" + "os" + "syscall" ) func isErrConnectionReset(err error) bool { - if strings.Contains(err.Error(), "read: connection reset") { - return false - } - - if strings.Contains(err.Error(), "connection reset") || - strings.Contains(err.Error(), "broken pipe") { - return true + if opErr, ok := err.(*net.OpError); ok { + if sysErr, ok := opErr.Err.(*os.SyscallError); ok { + return sysErr.Err == syscall.ECONNRESET + } } return false diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go new file mode 100644 index 00000000..daf9eca4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go @@ -0,0 +1,11 @@ +// +build appengine plan9 + +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + return strings.Contains(err.Error(), "connection reset") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 185b0731..605a72d3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -19,11 +19,10 @@ type Handlers struct { UnmarshalError HandlerList Retry HandlerList AfterRetry HandlerList - CompleteAttempt HandlerList Complete HandlerList } -// Copy returns a copy of this handler's lists. +// Copy returns of this handler's lists. func (h *Handlers) Copy() Handlers { return Handlers{ Validate: h.Validate.copy(), @@ -37,12 +36,11 @@ func (h *Handlers) Copy() Handlers { UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), AfterRetry: h.AfterRetry.copy(), - CompleteAttempt: h.CompleteAttempt.copy(), Complete: h.Complete.copy(), } } -// Clear removes callback functions for all handlers. +// Clear removes callback functions for all handlers func (h *Handlers) Clear() { h.Validate.Clear() h.Build.Clear() @@ -55,55 +53,9 @@ func (h *Handlers) Clear() { h.ValidateResponse.Clear() h.Retry.Clear() h.AfterRetry.Clear() - h.CompleteAttempt.Clear() h.Complete.Clear() } -// IsEmpty returns if there are no handlers in any of the handlerlists. -func (h *Handlers) IsEmpty() bool { - if h.Validate.Len() != 0 { - return false - } - if h.Build.Len() != 0 { - return false - } - if h.Send.Len() != 0 { - return false - } - if h.Sign.Len() != 0 { - return false - } - if h.Unmarshal.Len() != 0 { - return false - } - if h.UnmarshalStream.Len() != 0 { - return false - } - if h.UnmarshalMeta.Len() != 0 { - return false - } - if h.UnmarshalError.Len() != 0 { - return false - } - if h.ValidateResponse.Len() != 0 { - return false - } - if h.Retry.Len() != 0 { - return false - } - if h.AfterRetry.Len() != 0 { - return false - } - if h.CompleteAttempt.Len() != 0 { - return false - } - if h.Complete.Len() != 0 { - return false - } - - return true -} - // A HandlerListRunItem represents an entry in the HandlerList which // is being run. type HandlerListRunItem struct { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index 9370fa50..b0c2ef4f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -15,15 +15,12 @@ type offsetReader struct { closed bool } -func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { reader := &offsetReader{} - _, err := buf.Seek(offset, sdkio.SeekStart) - if err != nil { - return nil, err - } + buf.Seek(offset, sdkio.SeekStart) reader.buf = buf - return reader, nil + return reader } // Close will close the instance of the offset reader's access to @@ -57,9 +54,7 @@ func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { // CloseAndCopy will return a new offsetReader with a copy of the old buffer // and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { - if err := o.Close(); err != nil { - return nil, err - } +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() return newOffsetReader(o.buf, offset) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 8e332cce..63e7f71c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "net" "net/http" "net/url" "reflect" @@ -64,15 +65,6 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool - // Additional API error codes that should be retried. IsErrorRetryable - // will consider these codes in addition to its built in cases. - RetryErrorCodes []string - - // Additional API error codes that should be retried with throttle backoff - // delay. IsErrorThrottle will consider these codes in addition to its - // built in cases. - ThrottleErrorCodes []string - // A value greater than 0 instructs the request to be signed as Presigned URL // You should not set this field directly. Instead use Request's // Presign or PresignRequest methods. @@ -130,6 +122,7 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, Handlers: handlers.Copy(), Retryer: retryer, + AttemptTime: time.Now(), Time: time.Now(), ExpireTime: 0, Operation: operation, @@ -240,10 +233,6 @@ func (r *Request) WillRetry() bool { return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } -func fmtAttemptCount(retryCount, maxRetries int) string { - return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) -} - // ParamsFilled returns if the request's parameters have been populated // and the parameters are valid. False is returned if no parameters are // provided or invalid. @@ -272,25 +261,14 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader - - if aws.IsReaderSeekable(reader) { - var err error - // Get the Bodies current offset so retries will start from the same - // initial position. - r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to determine start of request body", err) - return - } - } + r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. r.ResetBody() } // Presign returns the request's signed URL. Error will be returned -// if the signing fails. The expire parameter is only used for presigned Amazon -// S3 API requests. All other AWS services will use a fixed expiration -// time of 15 minutes. +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expriation +// time of 15 minutes. // // It is invalid to create a presigned URL with a expire duration 0 or less. An // error is returned if expire duration is 0 or less. @@ -309,7 +287,7 @@ func (r *Request) Presign(expire time.Duration) (string, error) { // PresignRequest behaves just like presign, with the addition of returning a // set of headers that were signed. The expire parameter is only used for // presigned Amazon S3 API requests. All other AWS services will use a fixed -// expiration time of 15 minutes. +// expriation time of 15 minutes. // // It is invalid to create a presigned URL with a expire duration 0 or less. An // error is returned if expire duration is 0 or less. @@ -354,15 +332,16 @@ func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, err return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } -const ( - notRetrying = "not retrying" -) - -func debugLogReqError(r *Request, stage, retryStr string, err error) { +func debugLogReqError(r *Request, stage string, retrying bool, err error) { if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { return } + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) } @@ -381,12 +360,12 @@ func (r *Request) Build() error { if !r.built { r.Handlers.Validate.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Request", notRetrying, r.Error) + debugLogReqError(r, "Validate Request", false, r.Error) return r.Error } r.Handlers.Build.Run(r) if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) + debugLogReqError(r, "Build Request", false, r.Error) return r.Error } r.built = true @@ -402,7 +381,7 @@ func (r *Request) Build() error { func (r *Request) Sign() error { r.Build() if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) + debugLogReqError(r, "Build Request", false, r.Error) return r.Error } @@ -410,16 +389,12 @@ func (r *Request) Sign() error { return r.Error } -func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { +func (r *Request) getNextRequestBody() (io.ReadCloser, error) { if r.safeBody != nil { r.safeBody.Close() } - r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to get next request body reader", err) - } + r.safeBody = newOffsetReader(r.Body, r.BodyStart) // Go 1.8 tightened and clarified the rules code needs to use when building // requests with the http package. Go 1.8 removed the automatic detection @@ -436,10 +411,10 @@ func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { // Related golang/go#18257 l, err := aws.SeekerLen(r.Body) if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to compute request body size", err) + return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) } + var body io.ReadCloser if l == 0 { body = NoBody } else if l > 0 { @@ -491,90 +466,80 @@ func (r *Request) Send() error { r.Handlers.Complete.Run(r) }() - if err := r.Error; err != nil { - return err - } - for { - r.Error = nil r.AttemptTime = time.Now() - - if err := r.Sign(); err != nil { - debugLogReqError(r, "Sign Request", notRetrying, err) - return err + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } } - if err := r.sendRequest(); err == nil { - return nil - } - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - - if r.Error != nil || !aws.BoolValue(r.Retryable) { + r.Sign() + if r.Error != nil { return r.Error } - if err := r.prepareRetry(); err != nil { - r.Error = err - return err - } - } -} - -func (r *Request) prepareRetry() error { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - if err := r.Error; err != nil { - return awserr.New(ErrCodeSerialization, - "failed to prepare body for retry", err) - - } - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - - return nil -} - -func (r *Request) sendRequest() (sendErr error) { - defer r.Handlers.CompleteAttempt.Run(r) + r.Retryable = nil - r.Retryable = nil - r.Handlers.Send.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } + r.Handlers.Send.Run(r) + if r.Error != nil { + if !shouldRetryCancel(r) { + return r.Error + } + + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, err) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + err := r.Error + + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, err) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - debugLogReqError(r, "Validate Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, err) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error + break } return nil @@ -600,6 +565,32 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } +func shouldRetryCancel(r *Request) bool { + awsErr, ok := r.Error.(awserr.Error) + timeoutErr := false + errStr := r.Error.Error() + if ok { + if awsErr.Code() == CanceledErrorCode { + return false + } + err := awsErr.OrigErr() + netErr, netOK := err.(net.Error) + timeoutErr = netOK && netErr.Temporary() + if urlErr, ok := err.(*url.Error); !timeoutErr && ok { + errStr = urlErr.Err.Error() + } + } + + // There can be two types of canceled errors here. + // The first being a net.Error and the other being an error. + // If the request was timed out, we want to continue the retry + // process. Otherwise, return the canceled error. + return timeoutErr || + (errStr != "net/http: request canceled" && + errStr != "net/http: request canceled while waiting for connection") + +} + // SanitizeHostForHeader removes default port from host and updates request.Host func SanitizeHostForHeader(r *http.Request) { host := getHost(r) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index de1292f4..7c6a8000 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -4,8 +4,6 @@ package request import ( "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" ) // NoBody is a http.NoBody reader instructing Go HTTP client to not include @@ -26,8 +24,7 @@ var NoBody = http.NoBody func (r *Request) ResetBody() { body, err := r.getNextRequestBody() if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to reset request body", err) + r.Error = err return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index f093fc54..a633ed5a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -146,7 +146,7 @@ func (r *Request) nextPageTokens() []interface{} { return nil } case bool: - if !v { + if v == false { return nil } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index e84084da..7d527029 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -1,41 +1,23 @@ package request import ( - "net" - "net/url" - "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" ) -// Retryer provides the interface drive the SDK's request retry behavior. The -// Retryer implementation is responsible for implementing exponential backoff, -// and determine if a request API error should be retried. -// -// client.DefaultRetryer is the SDK's default implementation of the Retryer. It -// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle -// methods to determine if the request is retried. +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the client.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. type Retryer interface { - // RetryRules return the retry delay that should be used by the SDK before - // making another request attempt for the failed request. RetryRules(*Request) time.Duration - - // ShouldRetry returns if the failed request is retryable. - // - // Implementations may consider request attempt count when determining if a - // request is retryable, but the SDK will use MaxRetries to limit the - // number of attempts a request are made. ShouldRetry(*Request) bool - - // MaxRetries is the number of times a request may be retried before - // failing. MaxRetries() int } -// WithRetryer sets a Retryer value to the given Config returning the Config -// value for chaining. +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { cfg.Retryer = retryer return cfg @@ -56,10 +38,8 @@ var throttleCodes = map[string]struct{}{ "ThrottlingException": {}, "RequestLimitExceeded": {}, "RequestThrottled": {}, - "RequestThrottledException": {}, "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 - "TransactionInProgressException": {}, } // credsExpiredCodes is a collection of error codes which signify the credentials @@ -94,6 +74,10 @@ var validParentCodes = map[string]struct{}{ ErrCodeRead: {}, } +type temporaryError interface { + Temporary() bool +} + func isNestedErrorRetryable(parentErr awserr.Error) bool { if parentErr == nil { return false @@ -112,7 +96,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { return isCodeRetryable(aerr.Code()) } - if t, ok := err.(temporary); ok { + if t, ok := err.(temporaryError); ok { return t.Temporary() || isErrConnectionReset(err) } @@ -122,90 +106,32 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if error is nil. func IsErrorRetryable(err error) bool { - if err == nil { - return false - } - return shouldRetryError(err) -} - -type temporary interface { - Temporary() bool -} - -func shouldRetryError(origErr error) bool { - switch err := origErr.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - if isNestedErrorRetryable(err) { - return true - } - - origErr := err.OrigErr() - var shouldRetry bool - if origErr != nil { - shouldRetry := shouldRetryError(origErr) - if err.Code() == "RequestError" && !shouldRetry { - return false - } - } - if isCodeRetryable(err.Code()) { - return true - } - return shouldRetry - - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryError(err.Err) - - case temporary: - if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { - return true + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) } - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() || isErrConnectionReset(origErr) - - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retryable. See - // TestRequest4xxUnretryable for an example. - return true - - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true } + return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. // Returns false if error is nil. func IsErrorThrottle(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeThrottle(aerr.Code()) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) + } } return false } -// IsErrorExpiredCreds returns whether the error code is a credential expiry -// error. Returns false if error is nil. +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. func IsErrorExpiredCreds(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeExpiredCreds(aerr.Code()) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) + } } return false } @@ -215,58 +141,17 @@ func IsErrorExpiredCreds(err error) bool { // // Alias for the utility function IsErrorRetryable func (r *Request) IsErrorRetryable() bool { - if isErrCode(r.Error, r.RetryErrorCodes) { - return true - } - - // HTTP response status code 501 should not be retried. - // 501 represents Not Implemented which means the request method is not - // supported by the server and cannot be handled. - if r.HTTPResponse != nil { - // HTTP response status code 500 represents internal server error and - // should be retried without any throttle. - if r.HTTPResponse.StatusCode == 500 { - return true - } - } return IsErrorRetryable(r.Error) } -// IsErrorThrottle returns whether the error is to be throttled based on its -// code. Returns false if the request has no Error set. +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set // // Alias for the utility function IsErrorThrottle func (r *Request) IsErrorThrottle() bool { - if isErrCode(r.Error, r.ThrottleErrorCodes) { - return true - } - - if r.HTTPResponse != nil { - switch r.HTTPResponse.StatusCode { - case - 429, // error caused due to too many requests - 502, // Bad Gateway error should be throttled - 503, // caused when service is unavailable - 504: // error occurred due to gateway timeout - return true - } - } - return IsErrorThrottle(r.Error) } -func isErrCode(err error, codes []string) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - for _, code := range codes { - if code == aerr.Code() { - return true - } - } - } - - return false -} - // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index 8630683f..bcfd947a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -17,8 +17,6 @@ const ( ParamMinValueErrCode = "ParamMinValueError" // ParamMinLenErrCode is the error code for fields without enough elements. ParamMinLenErrCode = "ParamMinLenError" - // ParamMaxLenErrCode is the error code for value being too long. - ParamMaxLenErrCode = "ParamMaxLenError" // ParamFormatErrCode is the error code for a field with invalid // format or characters. @@ -239,29 +237,6 @@ func (e *ErrParamMinLen) MinLen() int { return e.min } -// An ErrParamMaxLen represents a maximum length parameter error. -type ErrParamMaxLen struct { - errInvalidParam - max int -} - -// NewErrParamMaxLen creates a new maximum length parameter error. -func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { - return &ErrParamMaxLen{ - errInvalidParam: errInvalidParam{ - code: ParamMaxLenErrCode, - field: field, - msg: fmt.Sprintf("maximum size of %v, %v", max, value), - }, - max: max, - } -} - -// MaxLen returns the field's required minimum length. -func (e *ErrParamMaxLen) MaxLen() int { - return e.max -} - // An ErrParamFormat represents a invalid format parameter error. type ErrParamFormat struct { errInvalidParam diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go deleted file mode 100644 index ea9ebb6f..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build go1.7 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go deleted file mode 100644 index fec39dfc..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !go1.6,go1.5 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go deleted file mode 100644 index 1c5a5391..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !go1.7,go1.6 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCABundleTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go deleted file mode 100644 index 7713ccfc..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ /dev/null @@ -1,259 +0,0 @@ -package session - -import ( - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/processcreds" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -func resolveCredentials(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (*credentials.Credentials, error) { - - switch { - case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration - // so load that profile from shared config first. - // Github(aws/aws-sdk-go#2727) - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - - case envCfg.Creds.HasKeys(): - // Environment credentials - return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil - - case len(envCfg.WebIdentityTokenFilePath) != 0: - // Web identity token from environment, RoleARN required to also be - // set. - return assumeWebIdentity(cfg, handlers, - envCfg.WebIdentityTokenFilePath, - envCfg.RoleARN, - envCfg.RoleSessionName, - ) - - default: - // Fallback to the "default" credential resolution chain. - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - } -} - -// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but -// 'AWS_IAM_ROLE_ARN' was not set. -var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) - -// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_IAM_ROLE_ARN' was set but -// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. -var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) - -func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, - filepath string, - roleARN, sessionName string, -) (*credentials.Credentials, error) { - - if len(filepath) == 0 { - return nil, WebIdentityEmptyTokenFilePathErr - } - - if len(roleARN) == 0 { - return nil, WebIdentityEmptyRoleARNErr - } - - creds := stscreds.NewWebIdentityCredentials( - &Session{ - Config: cfg, - Handlers: handlers.Copy(), - }, - roleARN, - sessionName, - filepath, - ) - - return creds, nil -} - -func resolveCredsFromProfile(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch { - case sharedCfg.SourceProfile != nil: - // Assume IAM role with credentials source from a different profile. - creds, err = resolveCredsFromProfile(cfg, envCfg, - *sharedCfg.SourceProfile, handlers, sessOpts, - ) - - case sharedCfg.Creds.HasKeys(): - // Static Credentials from Shared Config/Credentials file. - creds = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - - case len(sharedCfg.CredentialProcess) != 0: - // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) - - case len(sharedCfg.CredentialSource) != 0: - creds, err = resolveCredsFromSource(cfg, envCfg, - sharedCfg, handlers, sessOpts, - ) - - case len(sharedCfg.WebIdentityTokenFile) != 0: - // Credentials from Assume Web Identity token require an IAM Role, and - // that roll will be assumed. May be wrapped with another assume role - // via SourceProfile. - return assumeWebIdentity(cfg, handlers, - sharedCfg.WebIdentityTokenFile, - sharedCfg.RoleARN, - sharedCfg.RoleSessionName, - ) - - default: - // Fallback to default credentials provider, include mock errors for - // the credential chain so user can identify why credentials failed to - // be retrieved. - creds = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{ - Err: awserr.New("EnvAccessKeyNotFound", - "failed to find credentials in the environment.", nil), - }, - &credProviderError{ - Err: awserr.New("SharedCredsLoad", - fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), - }, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) - } - if err != nil { - return nil, err - } - - if len(sharedCfg.RoleARN) > 0 { - cfgCp := *cfg - cfgCp.Credentials = creds - return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) - } - - return creds, nil -} - -// valid credential source values -const ( - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" -) - -func resolveCredsFromSource(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch sharedCfg.CredentialSource { - case credSourceEc2Metadata: - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - case credSourceEnvironment: - creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) - - case credSourceECSContainer: - if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - return nil, ErrSharedConfigECSContainerEnvVarEmpty - } - - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - default: - return nil, ErrSharedConfigInvalidCredSource - } - - return creds, nil -} - -func credsFromAssumeRole(cfg aws.Config, - handlers request.Handlers, - sharedCfg sharedConfig, - sessOpts Options, -) (*credentials.Credentials, error) { - - if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return nil, AssumeRoleTokenProviderNotSetError{} - } - - return stscreds.NewCredentials( - &Session{ - Config: &cfg, - Handlers: handlers.Copy(), - }, - sharedCfg.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.RoleSessionName - opt.Duration = sessOpts.AssumeRoleDuration - - // Assume role with external ID - if len(sharedCfg.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ), nil -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a -// session when the MFAToken option is not set when shared config is configured -// load assume a role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error -} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 7ec66e7e..98d420fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -1,93 +1,97 @@ /* -Package session provides configuration for the SDK's service clients. Sessions -can be shared across service clients that share the same base configuration. +Package session provides configuration for the SDK's service clients. -Sessions are safe to use concurrently as long as the Session is not being -modified. Sessions should be cached when possible, because creating a new -Session will load all configuration values from the environment, and config -files each time the Session is created. Sharing the Session value across all of -your service clients will ensure the configuration is loaded the fewest number -of times possible. +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. -Sessions options from Shared Config +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. -By default NewSession will only load credentials from the shared credentials -file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is -set to a truthy value the Session will be created from the configuration -values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. Using the NewSessionWithOptions with -SharedConfigState set to SharedConfigEnable will create the session as if the -AWS_SDK_LOAD_CONFIG environment variable was set. +Concurrency -Credential and config loading order +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. -The Session will attempt to load configuration and credentials from the -environment, configuration files, and other credential sources. The order -configuration is loaded in is: +Sessions from Shared Config - * Environment Variables - * Shared Credentials file - * Shared Configuration file (if SharedConfig is enabled) - * EC2 Instance Metadata (credentials only) +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. -The Environment variables for credentials will have precedence over shared -config even if SharedConfig is enabled. To override this behavior, and use -shared config credentials instead specify the session.Options.Profile, (e.g. -when using credential_source=Environment to assume a role). +Creating Sessions - sess, err := session.NewSessionWithOptions(session.Options{ - Profile: "myProfile", - }) +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. -Creating Sessions +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. -Creating a Session without additional options will load credentials region, and -profile loaded from the environment and shared config automatically. See, -"Environment Variables" section for information on environment variables used -by Session. +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. // Create Session - sess, err := session.NewSession() + sess := session.Must(session.NewSession()) + // Create a Session with a custom region + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + })) -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded, config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. + // Create a S3 client instance from a session + sess := session.Must(session.NewSession()) - // Create a Session with a custom region - sess, err := session.NewSession(&aws.Config{ - Region: aws.String("us-west-2"), - }) + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. -Use NewSessionWithOptions to provide additional configuration driving how the -Session's configuration will be loaded. Such as, specifying shared config -profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). // Equivalent to session.NewSession() - sess, err := session.NewSessionWithOptions(session.Options{ + sess := session.Must(session.NewSessionWithOptions(session.Options{ // Options - }) + })) - sess, err := session.NewSessionWithOptions(session.Options{ - // Specify profile to load for the session's config - Profile: "profile_name", + // Specify profile to load for the session's config + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + })) - // Provide SDK Config options, such as Region. - Config: aws.Config{ - Region: aws.String("us-west-2"), - }, + // Specify profile for config and region for requests + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + })) - // Force enable Shared Config support + // Force enable Shared Config support + sess := session.Must(session.NewSessionWithOptions(session.Options{ SharedConfigState: session.SharedConfigEnable, - }) + })) Adding Handlers -You can add handlers to a session to decorate API operation, (e.g. adding HTTP -headers). All clients that use the Session receive a copy of the Session's -handlers. For example, the following request handler added to the Session logs -every requests made. +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: // Create a session, and add additional handlers for all service // clients created with the Session to inherit. Adds logging handler. @@ -95,15 +99,22 @@ every requests made. sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload - logger.Printf("Request: %s/%s, Params: %s", + logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) }) +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + Shared Config Fields -By default the SDK will only load the shared credentials file's -(~/.aws/credentials) credentials values, and all other config is provided by -the environment variables, SDK defaults, and user provided aws.Config values. +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable option is used to create the Session the full shared config values will be @@ -114,31 +125,24 @@ files have the same format. If both config files are present the configuration from both files will be read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file -(~/.aws/config). +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). -Credentials are the values the SDK uses to authenticating requests with AWS -Services. When specified in a file, both aws_access_key_id and -aws_secret_access_key must be provided together in the same file to be -considered valid. They will be ignored if both are not present. -aws_session_token is an optional field that can be provided in addition to the -other two fields. +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They are from a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. aws_access_key_id = AKID aws_secret_access_key = SECRET aws_session_token = TOKEN - ; region only supported if SharedConfigEnabled. - region = us-east-1 - -Assume Role configuration - -The role_arn field allows you to configure the SDK to assume an IAM role using -a set of credentials from another source. Such as when paired with static -credentials, "profile_source", "credential_process", or "credential_source" -fields. If "role_arn" is provided, a source of credentials must also be -specified, such as "source_profile", "credential_source", or -"credential_process". +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK supports assuming +a role with MFA token if the session option AssumeRoleTokenProvider +is set. role_arn = arn:aws:iam:::role/ source_profile = profile_with_creds @@ -146,16 +150,40 @@ specified, such as "source_profile", "credential_source", or mfa_serial = role_session_name = session_name +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Assume Role with MFA token -The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you -must also set the Session Option.AssumeRoleTokenProvider. The Session will fail -to load if the AssumeRoleTokenProvider is not specified. +To create a session with support for assuming an IAM role with MFA set the +session option AssumeRoleTokenProvider to a function that will prompt for the +MFA token code when the SDK assumes the role and refreshes the role's credentials. +This allows you to configure the SDK via the shared config to assumea role +with MFA tokens. + +In order for the SDK to assume a role with MFA the SharedConfigState +session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG +environment variable set. + +The shared configuration instructs the SDK to assume an IAM role with MFA +when the mfa_serial configuration field is set in the shared config +(~/.aws/config) or shared credentials (~/.aws/credentials) file. + +If mfa_serial is set in the configuration, the SDK will assume the role, and +the AssumeRoleTokenProvider session option is not set an an error will +be returned when creating the session. sess := session.Must(session.NewSessionWithOptions(session.Options{ AssumeRoleTokenProvider: stscreds.StdinTokenProvider, })) -To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess) + +To setup assume role outside of a session see the stscrds.AssumeRoleProvider documentation. Environment Variables diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 60a6f9ce..c94d0fb9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -80,7 +80,7 @@ type envConfig struct { // AWS_CONFIG_FILE=$HOME/my_shared_config SharedConfigFile string - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file // that the SDK will use instead of the system's root CA bundle. // Only use this if you want to configure the SDK to use a custom set // of CAs. @@ -99,41 +99,21 @@ type envConfig struct { CustomCABundle string csmEnabled string - CSMEnabled *bool + CSMEnabled bool CSMPort string - CSMHost string CSMClientID string + enableEndpointDiscovery string // Enables endpoint discovery via environment variables. // // AWS_ENABLE_ENDPOINT_DISCOVERY=true EnableEndpointDiscovery *bool - enableEndpointDiscovery string - - // Specifies the WebIdentity token the SDK should use to assume a role - // with. - // - // AWS_WEB_IDENTITY_TOKEN_FILE=file_path - WebIdentityTokenFilePath string - - // Specifies the IAM role arn to use when assuming an role. - // - // AWS_ROLE_ARN=role_arn - RoleARN string - - // Specifies the IAM role session name to use when assuming a role. - // - // AWS_ROLE_SESSION_NAME=session_name - RoleSessionName string } var ( csmEnabledEnvKey = []string{ "AWS_CSM_ENABLED", } - csmHostEnvKey = []string{ - "AWS_CSM_HOST", - } csmPortEnvKey = []string{ "AWS_CSM_PORT", } @@ -170,15 +150,6 @@ var ( sharedConfigFileEnvKey = []string{ "AWS_CONFIG_FILE", } - webIdentityTokenFilePathEnvKey = []string{ - "AWS_WEB_IDENTITY_TOKEN_FILE", - } - roleARNEnvKey = []string{ - "AWS_ROLE_ARN", - } - roleSessionNameEnvKey = []string{ - "AWS_ROLE_SESSION_NAME", - } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -207,33 +178,21 @@ func envConfigLoad(enableSharedConfig bool) envConfig { cfg.EnableSharedConfig = enableSharedConfig - // Static environment credentials - var creds credentials.Value - setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&creds.SessionToken, credSessionEnvKey) - if creds.HasKeys() { - // Require logical grouping of credentials - creds.ProviderName = EnvProviderName - cfg.Creds = creds - } - - // Role Metadata - setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) - setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) - - // Web identity environment variables - setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) // CSM environment variables setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) - setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + cfg.CSMEnabled = len(cfg.csmEnabled) > 0 - if len(cfg.csmEnabled) != 0 { - v, _ := strconv.ParseBool(cfg.csmEnabled) - cfg.CSMEnabled = &v + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = EnvProviderName } regionKeys := regionEnvKeys diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 7b0a942e..9b1ad609 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -8,17 +8,18 @@ import ( "io/ioutil" "net/http" "os" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) const ( @@ -104,20 +105,8 @@ func New(cfgs ...*aws.Config) *Session { } s := deprecatedNewSession(cfgs...) - - if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - err = fmt.Errorf("failed to enable CSM, %v", err) - s.Config.Logger.Log("ERROR:", err.Error()) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) - } + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) } return s @@ -136,7 +125,7 @@ func New(cfgs ...*aws.Config) *Session { // to be built with retrieving credentials with AssumeRole set in the config. // // See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created, such as specifying the +// control through code how the Session will be created. Such as specifying the // config profile, and controlling if shared config is enabled or not. func NewSession(cfgs ...*aws.Config) (*Session, error) { opts := Options{} @@ -220,12 +209,6 @@ type Options struct { // the config enables assume role wit MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) - // When the SDK's shared config is configured to assume a role this option - // may be provided to set the expiry duration of the STS credentials. - // Defaults to 15 minutes if not set as documented in the - // stscreds.AssumeRoleProvider. - AssumeRoleDuration time.Duration - // Reader for a custom Credentials Authority (CA) bundle in PEM format that // the SDK will use instead of the default system's root CA bundle. Use this // only if you want to replace the CA bundle the SDK uses for TLS requests. @@ -240,12 +223,6 @@ type Options struct { // to also enable this feature. CustomCABundle session option field has priority // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. CustomCABundle io.Reader - - // The handlers that the session and all API clients will be created with. - // This must be a complete set of handlers. Use the defaults.Handlers() - // function to initialize this value before changing the handlers to be - // used by the SDK. - Handlers request.Handlers } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -285,7 +262,7 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg = loadEnvConfig() } - if len(opts.Profile) != 0 { + if len(opts.Profile) > 0 { envCfg.Profile = opts.Profile } @@ -351,33 +328,27 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { return s } -func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { - if logger != nil { - logger.Log("Enabling CSM") +func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { + logger.Log("Enabling CSM") + if len(port) == 0 { + port = csm.DefaultPort } - r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) + r, err := csm.Start(clientID, "127.0.0.1:"+port) if err != nil { - return err + return } r.InjectHandlers(handlers) - - return nil } func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() - - handlers := opts.Handlers - if handlers.IsEmpty() { - handlers = defaults.Handlers() - } + handlers := defaults.Handlers() // Get a merged version of the user provided config to determine if // credentials were. userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) - cfg.MergeIn(userCfg) // Ordered config files will be loaded in with later files overwriting // previous config file values. @@ -394,17 +365,9 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) if err != nil { - if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { - // Special case where the user has not explicitly specified an AWS_PROFILE, - // or session.Options.profile, shared config is not enabled, and the - // environment has credentials, allow the shared config file to fail to - // load since the user has already provided credentials, and nothing else - // is required to be read file. Github(aws/aws-sdk-go#2455) - } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return nil, err - } + return nil, err } if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { @@ -417,16 +380,8 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) - - if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - return nil, err - } + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) } // Setup HTTP client with custom cert bundle if enabled @@ -439,46 +394,6 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return s, nil } -type csmConfig struct { - Enabled bool - Host string - Port string - ClientID string -} - -var csmProfileName = "aws_csm" - -func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { - if envCfg.CSMEnabled != nil { - if *envCfg.CSMEnabled { - return csmConfig{ - Enabled: true, - ClientID: envCfg.CSMClientID, - Host: envCfg.CSMHost, - Port: envCfg.CSMPort, - }, nil - } - return csmConfig{}, nil - } - - sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) - if err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return csmConfig{}, err - } - } - if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { - return csmConfig{ - Enabled: true, - ClientID: sharedCfg.CSMClientID, - Host: sharedCfg.CSMHost, - Port: sharedCfg.CSMPort, - }, nil - } - - return csmConfig{}, nil -} - func loadCustomCABundle(s *Session, bundle io.Reader) error { var t *http.Transport switch v := s.Config.HTTPClient.Transport.(type) { @@ -491,10 +406,7 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error { } } if t == nil { - // Nil transport implies `http.DefaultTransport` should be used. Since - // the SDK cannot modify, nor copy the `DefaultTransport` specifying - // the values the next closest behavior. - t = getCABundleTransport() + t = &http.Transport{} } p, err := loadCertPool(bundle) @@ -527,11 +439,9 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) { return p, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) error { +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { + // Merge in user provided configuration + cfg.MergeIn(userCfg) // Region if not already set by user if len(aws.StringValue(cfg.Region)) == 0 { @@ -542,7 +452,7 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, } } - if cfg.EnableEndpointDiscovery == nil { + if aws.BoolValue(envCfg.EnableEndpointDiscovery) { if envCfg.EnableEndpointDiscovery != nil { cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { @@ -550,19 +460,160 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, } } - // Configure credentials if not already set by the user when creating the - // Session. + // Configure credentials if not already set if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) - if err != nil { - return err + + // inspect the profile to see if a credential source has been specified. + if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { + + // if both credential_source and source_profile have been set, return an error + // as this is undefined behavior. + if len(sharedCfg.AssumeRole.SourceProfile) > 0 { + return ErrSharedConfigSourceCollision + } + + // valid credential source values + const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" + ) + + switch sharedCfg.AssumeRole.CredentialSource { + case credSourceEc2Metadata: + cfgCp := *cfg + p := defaults.RemoteCredProvider(cfgCp, handlers) + cfgCp.Credentials = credentials.NewCredentials(p) + + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + + cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) + case credSourceEnvironment: + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return ErrSharedConfigECSContainerEnvVarEmpty + } + + cfgCp := *cfg + p := defaults.RemoteCredProvider(cfgCp, handlers) + creds := credentials.NewCredentials(p) + + cfg.Credentials = creds + default: + return ErrSharedConfigInvalidCredSource + } + + return nil + } + + if len(envCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + cfgCp := *cfg + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.AssumeRoleSource.Creds, + ) + + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + + cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + } else { + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, + &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) } - cfg.Credentials = creds } return nil } +func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials { + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ) +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} + func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) @@ -571,7 +622,7 @@ func initHandlers(s *Session) { } } -// Copy creates and returns a copy of the current Session, copying the config +// Copy creates and returns a copy of the current Session, coping the config // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index d91ac93a..427b8a4e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -5,6 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/ini" ) @@ -22,57 +23,45 @@ const ( mfaSerialKey = `mfa_serial` // optional roleSessionNameKey = `role_session_name` // optional - // CSM options - csmEnabledKey = `csm_enabled` - csmHostKey = `csm_host` - csmPortKey = `csm_port` - csmClientIDKey = `csm_client_id` - // Additional Config fields regionKey = `region` // endpoint discovery group enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional - // External Credential Process - credentialProcessKey = `credential_process` // optional - - // Web Identity Token File - webIdentityTokenFileKey = `web_identity_token_file` // optional - // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. DefaultSharedConfigProfile = `default` ) +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + CredentialSource string + ExternalID string + MFASerial string + RoleSessionName string +} + // sharedConfig represents the configuration fields of the SDK config files. type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id and - // aws_secret_access_key must be provided together in the same file to be - // considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of - // the other two fields are also provided. + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. // // aws_access_key_id // aws_secret_access_key // aws_session_token Creds credentials.Value - CredentialSource string - CredentialProcess string - WebIdentityTokenFile string + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig - RoleARN string - RoleSessionName string - ExternalID string - MFASerial string - - SourceProfileName string - SourceProfile *sharedConfig - - // Region is the region the SDK should use for looking up AWS service - // endpoints and signing requests. + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. // // region Region string @@ -82,12 +71,6 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool - - // CSM Options - CSMEnabled *bool - CSMHost string - CSMPort string - CSMClientID string } type sharedConfigFile struct { @@ -95,18 +78,17 @@ type sharedConfigFile struct { IniData ini.Sections } -// loadSharedConfig retrieves the configuration from the list of files using -// the profile provided. The order the files are listed will determine +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in // earlier files. // // For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of -// A's. +// of the files are A then B, B's credential values will be used instead of A's. // // See sharedConfig.setFromFile for information how the config files // will be loaded. -func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { if len(profile) == 0 { profile = DefaultSharedConfigProfile } @@ -117,11 +99,16 @@ func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedCo } cfg := sharedConfig{} - profiles := map[string]struct{}{} - if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + if err = cfg.setFromIniFiles(profile, files); err != nil { return sharedConfig{}, err } + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + return cfg, nil } @@ -145,88 +132,60 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { return files, nil } -func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { - // Trim files from the list that don't exist. - var skippedFiles int - var profileNotFoundErr error - for _, f := range files { - if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore profiles not defined in individual files. - profileNotFoundErr = err - skippedFiles++ - continue - } - return err - } - } - if skippedFiles == len(files) { - // If all files were skipped because the profile is not found, return - // the original profile not found error. - return profileNotFoundErr +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + if len(cfg.AssumeRole.CredentialSource) > 0 { + // setAssumeRoleSource is only called when source_profile is found. + // If both source_profile and credential_source are set, then + // ErrSharedConfigSourceCollision will be returned + return ErrSharedConfigSourceCollision } - if _, ok := profiles[profile]; ok { - // if this is the second instance of the profile the Assume Role - // options must be cleared because they are only valid for the - // first reference of a profile. The self linked instance of the - // profile only have credential provider options. - cfg.clearAssumeRoleOptions() + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} } else { - // First time a profile has been seen, It must either be a assume role - // or credentials. Assert if the credential type requires a role ARN, - // the ARN is also set. - if err := cfg.validateCredentialsRequireARN(profile); err != nil { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { return err } } - profiles[profile] = struct{}{} - if err := cfg.validateCredentialType(); err != nil { - return err + if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} } - // Link source profiles for assume roles - if len(cfg.SourceProfileName) != 0 { - // Linked profile via source_profile ignore credential provider - // options, the source profile must provide the credentials. - cfg.clearCredentialOptions() + cfg.AssumeRoleSource = &assumeRoleSrc - srcCfg := &sharedConfig{} - err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) - if err != nil { - // SourceProfile that doesn't exist is an error in configuration. + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { if _, ok := err.(SharedConfigProfileNotExistsError); ok { - err = SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } + // Ignore proviles missings + continue } return err } - - if !srcCfg.hasCredentials() { - return SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - - cfg.SourceProfile = srcCfg } return nil } -// setFromFile loads the configuration from the file using the profile -// provided. A sharedConfig pointer type value is used so that multiple config -// file loadings can be chained. +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. // // Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For -// example if a config file only includes aws_access_key_id but no -// aws_secret_access_key the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { section, ok := file.IniData.GetSection(profile) if !ok { // Fallback to to alternate profile name: profile @@ -236,141 +195,46 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e } } - if exOpts { - // Assume Role Parameters - updateString(&cfg.RoleARN, section, roleArnKey) - updateString(&cfg.ExternalID, section, externalIDKey) - updateString(&cfg.MFASerial, section, mfaSerialKey) - updateString(&cfg.RoleSessionName, section, roleSessionNameKey) - updateString(&cfg.SourceProfileName, section, sourceProfileKey) - updateString(&cfg.CredentialSource, section, credentialSourceKey) - - updateString(&cfg.Region, section, regionKey) - } - - updateString(&cfg.CredentialProcess, section, credentialProcessKey) - updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) - // Shared Credentials - creds := credentials.Value{ - AccessKeyID: section.String(accessKeyIDKey), - SecretAccessKey: section.String(secretAccessKey), - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), - } - if creds.HasKeys() { - cfg.Creds = creds - } - - // Endpoint discovery - updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) - - // CSM options - updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) - updateString(&cfg.CSMHost, section, csmHostKey) - updateString(&cfg.CSMPort, section, csmPortKey) - updateString(&cfg.CSMClientID, section, csmClientIDKey) - - return nil -} - -func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { - var credSource string - - switch { - case len(cfg.SourceProfileName) != 0: - credSource = sourceProfileKey - case len(cfg.CredentialSource) != 0: - credSource = credentialSourceKey - case len(cfg.WebIdentityTokenFile) != 0: - credSource = webIdentityTokenFileKey - } - - if len(credSource) != 0 && len(cfg.RoleARN) == 0 { - return CredentialRequiresARNError{ - Type: credSource, - Profile: profile, + akid := section.String(accessKeyIDKey) + secret := section.String(secretAccessKey) + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), } } - return nil -} - -func (cfg *sharedConfig) validateCredentialType() error { - // Only one or no credential type can be defined. - if !oneOrNone( - len(cfg.SourceProfileName) != 0, - len(cfg.CredentialSource) != 0, - len(cfg.CredentialProcess) != 0, - len(cfg.WebIdentityTokenFile) != 0, - ) { - return ErrSharedConfigSourceCollision - } - - return nil -} - -func (cfg *sharedConfig) hasCredentials() bool { - switch { - case len(cfg.SourceProfileName) != 0: - case len(cfg.CredentialSource) != 0: - case len(cfg.CredentialProcess) != 0: - case len(cfg.WebIdentityTokenFile) != 0: - case cfg.Creds.HasKeys(): - default: - return false - } - - return true -} - -func (cfg *sharedConfig) clearCredentialOptions() { - cfg.CredentialSource = "" - cfg.CredentialProcess = "" - cfg.WebIdentityTokenFile = "" - cfg.Creds = credentials.Value{} -} - -func (cfg *sharedConfig) clearAssumeRoleOptions() { - cfg.RoleARN = "" - cfg.ExternalID = "" - cfg.MFASerial = "" - cfg.RoleSessionName = "" - cfg.SourceProfileName = "" -} - -func oneOrNone(bs ...bool) bool { - var count int - - for _, b := range bs { - if b { - count++ - if count > 1 { - return false - } + // Assume Role + roleArn := section.String(roleArnKey) + srcProfile := section.String(sourceProfileKey) + credentialSource := section.String(credentialSourceKey) + hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 + if len(roleArn) > 0 && hasSource { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + CredentialSource: credentialSource, + ExternalID: section.String(externalIDKey), + MFASerial: section.String(mfaSerialKey), + RoleSessionName: section.String(roleSessionNameKey), } } - return true -} - -// updateString will only update the dst with the value in the section key, key -// is present in the section. -func updateString(dst *string, section ini.Section, key string) { - if !section.Has(key) { - return + // Region + if v := section.String(regionKey); len(v) > 0 { + cfg.Region = v } - *dst = section.String(key) -} -// updateBoolPtr will only update the dst with the value in the section key, -// key is present in the section. -func updateBoolPtr(dst **bool, section ini.Section, key string) { - if !section.Has(key) { - return + // Endpoint discovery + if section.Has(enableEndpointDiscoveryKey) { + v := section.Bool(enableEndpointDiscoveryKey) + cfg.EnableEndpointDiscovery = &v } - *dst = new(bool) - **dst = section.Bool(key) + + return nil } // SharedConfigLoadError is an error for the shared config file failed to load. @@ -430,8 +294,7 @@ func (e SharedConfigProfileNotExistsError) Error() string { // profile contains assume role information, but that information is invalid // or not complete. type SharedConfigAssumeRoleError struct { - RoleARN string - SourceProfile string + RoleARN string } // Code is the short id of the error. @@ -441,10 +304,8 @@ func (e SharedConfigAssumeRoleError) Code() string { // Message is the description of the error func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf( - "failed to load assume role for %s, source profile %s has no shared credentials", - e.RoleARN, e.SourceProfile, - ) + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) } // OrigErr is the underlying error that caused the failure. @@ -456,36 +317,3 @@ func (e SharedConfigAssumeRoleError) OrigErr() error { func (e SharedConfigAssumeRoleError) Error() string { return awserr.SprintError(e.Code(), e.Message(), "", nil) } - -// CredentialRequiresARNError provides the error for shared config credentials -// that are incorrectly configured in the shared config or credentials file. -type CredentialRequiresARNError struct { - // type of credentials that were configured. - Type string - - // Profile name the credentials were in. - Profile string -} - -// Code is the short id of the error. -func (e CredentialRequiresARNError) Code() string { - return "CredentialRequiresARNError" -} - -// Message is the description of the error -func (e CredentialRequiresARNError) Message() string { - return fmt.Sprintf( - "credential type %s requires role_arn, profile %s", - e.Type, e.Profile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e CredentialRequiresARNError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e CredentialRequiresARNError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 8104793a..155645d6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -98,25 +98,25 @@ var ignoredHeaders = rules{ var requiredSignedHeaders = rules{ whitelist{ mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, @@ -134,7 +134,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Tagging": struct{}{}, + "X-Amz-Tagging": struct{}{}, "X-Amz-Website-Redirect-Location": struct{}{}, "X-Amz-Content-Sha256": struct{}{}, }, @@ -182,7 +182,7 @@ type Signer struct { // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html DisableURIPathEscaping bool - // Disables the automatical setting of the HTTP request's Body field with the + // Disales the automatical setting of the HTTP request's Body field with the // io.ReadSeeker passed in to the signer. This is useful if you're using a // custom wrapper around the body for the io.ReadSeeker and want to preserve // the Body value on the Request.Body. @@ -422,7 +422,7 @@ var SignRequestHandler = request.NamedHandler{ // If the credentials of the request's config are set to // credentials.AnonymousCredentials the request will not be signed. func SignSDKRequest(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now) + signSDKRequestWithCurrTime(req, time.Now) } // BuildNamedHandler will build a generic handler for signing. @@ -430,15 +430,12 @@ func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler return request.NamedHandler{ Name: name, Fn: func(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now, opts...) + signSDKRequestWithCurrTime(req, time.Now, opts...) }, } } -// SignSDKRequestWithCurrentTime will sign the SDK's request using the time -// function passed in. Behaves the same as SignSDKRequest with the exception -// the request is signed with the value returned by the current time function. -func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { // If the request does not need to be signed ignore the signing of the // request if the AnonymousCredentials object is used. if req.Config.Credentials == credentials.AnonymousCredentials { @@ -474,9 +471,13 @@ func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.T opt(v4) } - curTime := curTimeFn() + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + name, region, req.ExpireTime, req.ExpireTime > 0, signingTime, ) if err != nil { req.Error = err @@ -485,7 +486,7 @@ func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.T } req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTime + req.LastSignedAt = curTimeFn() } const logSignInfoMsg = `DEBUG: Request Signature: @@ -687,11 +688,7 @@ func (ctx *signingCtx) buildBodyDigest() error { if !aws.IsReaderSeekable(ctx.Body) { return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) } - hashBytes, err := makeSha256Reader(ctx.Body) - if err != nil { - return err - } - hash = hex.EncodeToString(hashBytes) + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } if includeSHA256Header { @@ -738,16 +735,10 @@ func makeSha256(data []byte) []byte { return hash.Sum(nil) } -func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { +func makeSha256Reader(reader io.ReadSeeker) []byte { hash := sha256.New() - start, err := reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - return nil, err - } - defer func() { - // ensure error is return if unable to seek back to start of payload. - _, err = reader.Seek(start, sdkio.SeekStart) - }() + start, _ := reader.Seek(0, sdkio.SeekCurrent) + defer reader.Seek(start, sdkio.SeekStart) // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. @@ -758,13 +749,13 @@ func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { io.CopyN(hash, reader, size) } - return hash.Sum(nil), nil + return hash.Sum(nil) } const doubleSpace = " " // stripExcessSpaces will rewrite the passed in slice's string values to not -// contain multiple side-by-side spaces. +// contain muliple side-by-side spaces. func stripExcessSpaces(vals []string) { var j, k, l, m, spaces int for i, str := range vals { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 45509154..8b6f2342 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -7,18 +7,13 @@ import ( "github.com/aws/aws-sdk-go/internal/sdkio" ) -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the -// SDK to accept an io.Reader that is not also an io.Seeker for unsigned -// streaming payload API operations. +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should +// only be used with an io.Reader that is also an io.Seeker. Doing so may +// cause request signature errors, or request body's not sent for GET, HEAD +// and DELETE HTTP methods. // -// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API -// operation's input will prevent that operation being retried in the case of -// network errors, and cause operation requests to fail if the operation -// requires payload signing. -// -// Note: If using With S3 PutObject to stream an object upload The SDK's S3 -// Upload manager (s3manager.Uploader) provides support for streaming with the -// ability to retry network errors. +// Deprecated: Should only be used with io.ReadSeeker. If using for +// S3 PutObject to stream content use s3manager.Uploader instead. func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } @@ -48,8 +43,7 @@ func IsReaderSeekable(r io.Reader) bool { // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // -// If the reader is not an io.Reader zero bytes read, and nil error will be -// returned. +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. // // Performs the same functionality as io.Reader Read func (r ReaderSeekerCloser) Read(p []byte) (int, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index d1548ebd..64e80aca 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.25.3" +const SDKVersion = "1.15.78" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go index e56dcee2..84b50c2e 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -304,9 +304,7 @@ loop: stmt := newCommentStatement(tok) stack.Push(stmt) default: - return nil, NewParseError( - fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", - k, tok.Type())) + return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok)) } if len(tokens) > 0 { @@ -316,10 +314,10 @@ loop: // this occurs when a statement has not been completed if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) } - // returns a sublist which excludes the start symbol + // returns a sublist which exludes the start symbol return stack.List(), nil } @@ -337,12 +335,13 @@ func trimSpaces(k AST) AST { } // trim right hand side of spaces - for i := len(k.Root.raw) - 1; i >= 0; i-- { + for i := len(k.Root.raw) - 1; i > 0; i-- { if !isWhitespace(k.Root.raw[i]) { break } k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + i-- } return k diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go index 18f3fe89..ba0af01b 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -15,7 +15,7 @@ func newExprStatement(ast AST) AST { return newAST(ASTKindExprStatement, ast) } -// CommentStatement represents a comment in the ini definition. +// CommentStatement represents a comment in the ini defintion. // // grammar: // comment -> #comment' | ;comment' diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go deleted file mode 100644 index 6c443988..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package sdkio - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go deleted file mode 100644 index 44898eed..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.10 - -package sdkmath - -import "math" - -// Round returns the nearest integer, rounding half away from zero. -// -// Special cases are: -// Round(±0) = ±0 -// Round(±Inf) = ±Inf -// Round(NaN) = NaN -func Round(x float64) float64 { - return math.Round(x) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go deleted file mode 100644 index 810ec7f0..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build !go1.10 - -package sdkmath - -import "math" - -// Copied from the Go standard library's (Go 1.12) math/floor.go for use in -// Go version prior to Go 1.10. -const ( - uvone = 0x3FF0000000000000 - mask = 0x7FF - shift = 64 - 11 - 1 - bias = 1023 - signMask = 1 << 63 - fracMask = 1<= 0.5 { - // return t + Copysign(1, x) - // } - // return t - // } - bits := math.Float64bits(x) - e := uint(bits>>shift) & mask - if e < bias { - // Round abs(x) < 1 including denormals. - bits &= signMask // +-0 - if e == bias-1 { - bits |= uvone // +-1 - } - } else if e < bias+shift { - // Round any abs(x) >= 1 containing a fractional component [0,1). - // - // Numbers with larger exponents are returned unchanged since they - // must be either an integer, infinity, or NaN. - const half = 1 << (shift - 1) - e -= bias - bits += half >> e - bits &^= fracMask >> e - } - return math.Float64frombits(bits) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go deleted file mode 100644 index f4651da2..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.6 - -package sdkrand - -import "math/rand" - -// Read provides the stub for math.Rand.Read method support for go version's -// 1.6 and greater. -func Read(r *rand.Rand, p []byte) (int, error) { - return r.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go deleted file mode 100644 index b1d93a33..00000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !go1.6 - -package sdkrand - -import "math/rand" - -// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 -func Read(r *rand.Rand, p []byte) (n int, err error) { - // Copy of Go standard libraries math package's read function not added to - // standard library until Go 1.6. - var pos int8 - var val int64 - for n = 0; n < len(p); n++ { - if pos == 0 { - val = r.Int63() - pos = 7 - } - p[n] = byte(val) - val >>= 8 - pos-- - } - - return n, err -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go index 7da8a49c..b63e4c26 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -7,6 +7,6 @@ const ( ) // ECSContainerCredentialsURI is the endpoint to retrieve container -// credentials. This can be overridden to test to ensure the credential process +// credentials. This can be overriden to test to ensure the credential process // is behaving correctly. var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go index d7d42db0..f06f44ee 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -1,54 +1,7 @@ package protocol -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// ValidateEndpointHostHandler is a request handler that will validate the -// request endpoint's hosts is a valid RFC 3986 host. -var ValidateEndpointHostHandler = request.NamedHandler{ - Name: "awssdk.protocol.ValidateEndpointHostHandler", - Fn: func(r *request.Request) { - err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) - if err != nil { - r.Error = err - } - }, -} - -// ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. -func ValidateEndpointHost(opName, host string) error { - paramErrs := request.ErrInvalidParams{Context: opName} - labels := strings.Split(host, ".") - - for i, label := range labels { - if i == len(labels)-1 && len(label) == 0 { - // Allow trailing dot for FQDN hosts. - continue - } - - if !ValidHostLabel(label) { - paramErrs.Add(request.NewErrParamFormat( - "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) - } - } - - if len(host) > 255 { - paramErrs.Add(request.NewErrParamMaxLen( - "endpoint host", 255, host, - )) - } - - if paramErrs.Len() > 0 { - return paramErrs - } - return nil -} - -// ValidHostLabel returns if the label is a valid RFC 3986 host label. +// ValidHostLabel returns if the label is a valid RFC 1123 Section 2.1 domain +// host label name. func ValidHostLabel(label string) bool { if l := len(label); l == 0 || l > 63 { return false diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go deleted file mode 100644 index 915b0fca..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go +++ /dev/null @@ -1,54 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// HostPrefixHandlerName is the handler name for the host prefix request -// handler. -const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" - -// NewHostPrefixHandler constructs a build handler -func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { - builder := HostPrefixBuilder{ - Prefix: prefix, - LabelsFn: labelsFn, - } - - return request.NamedHandler{ - Name: HostPrefixHandlerName, - Fn: builder.Build, - } -} - -// HostPrefixBuilder provides the request handler to expand and prepend -// the host prefix into the operation's request endpoint host. -type HostPrefixBuilder struct { - Prefix string - LabelsFn func() map[string]string -} - -// Build updates the passed in Request with the HostPrefix template expanded. -func (h HostPrefixBuilder) Build(r *request.Request) { - if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { - return - } - - var labels map[string]string - if h.LabelsFn != nil { - labels = h.LabelsFn() - } - - prefix := h.Prefix - for name, value := range labels { - prefix = strings.Replace(prefix, "{"+name+"}", value, -1) - } - - r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host - if len(r.HTTPRequest.Host) > 0 { - r.HTTPRequest.Host = prefix + r.HTTPRequest.Host - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go deleted file mode 100644 index 864fb670..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ /dev/null @@ -1,296 +0,0 @@ -// Package jsonutil provides JSON serialization of AWS requests and responses. -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/private/protocol" -) - -var timeType = reflect.ValueOf(time.Time{}).Type() -var byteSliceType = reflect.ValueOf([]byte{}).Type() - -// BuildJSON builds a JSON string for a given object v. -func BuildJSON(v interface{}) ([]byte, error) { - var buf bytes.Buffer - - err := buildAny(reflect.ValueOf(v), &buf, "") - return buf.Bytes(), err -} - -func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - origVal := value - value = reflect.Indirect(value) - if !value.IsValid() { - return nil - } - - vtype := value.Type() - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if value.Type() != timeType { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return buildStruct(value, buf, tag) - case "list": - return buildList(value, buf, tag) - case "map": - return buildMap(value, buf, tag) - default: - return buildScalar(origVal, buf, tag) - } -} - -func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - buf.WriteByte('{') - - t := value.Type() - first := true - for i := 0; i < t.NumField(); i++ { - member := value.Field(i) - - // This allocates the most memory. - // Additionally, we cannot skip nil fields due to - // idempotency auto filling. - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("json") == "-" { - continue - } - if field.Tag.Get("location") != "" { - continue // ignore non-body elements - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(member, field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(&token) - } - - if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { - continue // ignore unset fields - } - - if first { - first = false - } else { - buf.WriteByte(',') - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - writeString(name, buf) - buf.WriteString(`:`) - - err := buildAny(member, buf, field.Tag) - if err != nil { - return err - } - - } - - buf.WriteString("}") - - return nil -} - -func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("[") - - for i := 0; i < value.Len(); i++ { - buildAny(value.Index(i), buf, "") - - if i < value.Len()-1 { - buf.WriteString(",") - } - } - - buf.WriteString("]") - - return nil -} - -type sortedValues []reflect.Value - -func (sv sortedValues) Len() int { return len(sv) } -func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } - -func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("{") - - sv := sortedValues(value.MapKeys()) - sort.Sort(sv) - - for i, k := range sv { - if i > 0 { - buf.WriteByte(',') - } - - writeString(k.String(), buf) - buf.WriteString(`:`) - - buildAny(value.MapIndex(k), buf, "") - } - - buf.WriteString("}") - - return nil -} - -func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - // prevents allocation on the heap. - scratch := [64]byte{} - switch value := reflect.Indirect(v); value.Kind() { - case reflect.String: - writeString(value.String(), buf) - case reflect.Bool: - if value.Bool() { - buf.WriteString("true") - } else { - buf.WriteString("false") - } - case reflect.Int64: - buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) - case reflect.Float64: - f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} - } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) - default: - switch converted := value.Interface().(type) { - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.UnixTimeFormatName - } - - ts := protocol.FormatTime(format, converted) - if format != protocol.UnixTimeFormatName { - ts = `"` + ts + `"` - } - - buf.WriteString(ts) - case []byte: - if !value.IsNil() { - buf.WriteByte('"') - if len(converted) < 1024 { - // for small buffers, using Encode directly is much faster. - dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) - base64.StdEncoding.Encode(dst, converted) - buf.Write(dst) - } else { - // for large buffers, avoid unnecessary extra temporary - // buffer space. - enc := base64.NewEncoder(base64.StdEncoding, buf) - enc.Write(converted) - enc.Close() - } - buf.WriteByte('"') - } - case aws.JSONValue: - str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) - if err != nil { - return fmt.Errorf("unable to encode JSONValue, %v", err) - } - buf.WriteString(str) - default: - return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) - } - } - return nil -} - -var hex = "0123456789abcdef" - -func writeString(s string, buf *bytes.Buffer) { - buf.WriteByte('"') - for i := 0; i < len(s); i++ { - if s[i] == '"' { - buf.WriteString(`\"`) - } else if s[i] == '\\' { - buf.WriteString(`\\`) - } else if s[i] == '\b' { - buf.WriteString(`\b`) - } else if s[i] == '\f' { - buf.WriteString(`\f`) - } else if s[i] == '\r' { - buf.WriteString(`\r`) - } else if s[i] == '\t' { - buf.WriteString(`\t`) - } else if s[i] == '\n' { - buf.WriteString(`\n`) - } else if s[i] < 32 { - buf.WriteString("\\u00") - buf.WriteByte(hex[s[i]>>4]) - buf.WriteByte(hex[s[i]&0xF]) - } else { - buf.WriteByte(s[i]) - } - } - buf.WriteByte('"') -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go deleted file mode 100644 index ea0da79a..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ /dev/null @@ -1,250 +0,0 @@ -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "reflect" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in -// type. The value to unmarshal the json document into must be a pointer to the -// type. -func UnmarshalJSONError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := json.NewDecoder(body).Decode(v) - if err != nil { - msg := "failed decoding error message" - if err == io.EOF { - msg = "error message missing" - err = nil - } - return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) - } - - return nil -} - -// UnmarshalJSON reads a stream and unmarshals the results in object v. -func UnmarshalJSON(v interface{}, stream io.Reader) error { - var out interface{} - - err := json.NewDecoder(stream).Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshalAny(reflect.ValueOf(v), out, "") -} - -func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { - vtype := value.Type() - if vtype.Kind() == reflect.Ptr { - vtype = vtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := value.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return unmarshalStruct(value, data, tag) - case "list": - return unmarshalList(value, data, tag) - case "map": - return unmarshalMap(value, data, tag) - default: - return unmarshalScalar(value, data, tag) - } -} - -func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a structure (%#v)", data) - } - - t := value.Type() - if value.Kind() == reflect.Ptr { - if value.IsNil() { // create the structure if it's nil - s := reflect.New(value.Type().Elem()) - value.Set(s) - value = s - } - - value = value.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return unmarshalAny(value.FieldByName(payload), data, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if field.PkgPath != "" { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - member := value.FieldByIndex(field.Index) - err := unmarshalAny(member, mapData[name], field.Tag) - if err != nil { - return err - } - } - return nil -} - -func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - listData, ok := data.([]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a list (%#v)", data) - } - - if value.IsNil() { - l := len(listData) - value.Set(reflect.MakeSlice(value.Type(), l, l)) - } - - for i, c := range listData { - err := unmarshalAny(value.Index(i), c, "") - if err != nil { - return err - } - } - - return nil -} - -func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a map (%#v)", data) - } - - if value.IsNil() { - value.Set(reflect.MakeMap(value.Type())) - } - - for k, v := range mapData { - kvalue := reflect.ValueOf(k) - vvalue := reflect.New(value.Type().Elem()).Elem() - - unmarshalAny(vvalue, v, "") - value.SetMapIndex(kvalue, vvalue) - } - - return nil -} - -func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { - - switch d := data.(type) { - case nil: - return nil // nothing to do here - case string: - switch value.Interface().(type) { - case *string: - value.Set(reflect.ValueOf(&d)) - case []byte: - b, err := base64.StdEncoding.DecodeString(d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(b)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - // No need to use escaping as the value is a non-quoted string. - v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) - if err != nil { - return err - } - value.Set(reflect.ValueOf(v)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case float64: - switch value.Interface().(type) { - case *int64: - di := int64(d) - value.Set(reflect.ValueOf(&di)) - case *float64: - value.Set(reflect.ValueOf(&d)) - case *time.Time: - // Time unmarshaled from a float64 can only be epoch seconds - t := time.Unix(int64(d), 0).UTC() - value.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case bool: - switch value.Interface().(type) { - case *bool: - value.Set(reflect.ValueOf(&d)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - default: - return fmt.Errorf("unsupported JSON value (%v)", data) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 0cb99eb5..60e5b09d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -21,7 +21,7 @@ func Build(r *request.Request) { "Version": {r.ClientInfo.APIVersion}, } if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index f69c1efc..3495c730 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -24,7 +24,7 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + awserr.New("SerializationError", "failed decoding Query response", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index 831b0110..46d354e8 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -2,68 +2,73 @@ package query import ( "encoding/xml" - "fmt" + "io/ioutil" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} - type xmlErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` } -type xmlResponseError struct { - xmlErrorResponse +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` } -func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - const svcUnavailableTagName = "ServiceUnavailableException" - const errorResponseTagName = "ErrorResponse" - - switch start.Name.Local { - case svcUnavailableTagName: - e.Code = svcUnavailableTagName - e.Message = "service is unavailable" - return d.Skip() - - case errorResponseTagName: - return d.DecodeElement(&e.xmlErrorResponse, &start) - - default: - return fmt.Errorf("unknown error response tag, %v", start) - } -} +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} // UnmarshalError unmarshals an error response for an AWS Query service. func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - var respErr xmlResponseError - err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), + awserr.New("SerializationError", "failed to read from query HTTP response body", err), r.HTTPResponse.StatusCode, r.RequestID, ) return } - reqID := respErr.RequestID - if len(reqID) == 0 { - reqID = r.RequestID + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return } + // Failed to retrieve any error message from the response body r.Error = awserr.NewRequestFailure( - awserr.New(respErr.Code, respErr.Message, nil), + awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr), r.HTTPResponse.StatusCode, - reqID, + r.RequestID, ) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 1301b149..b34f5258 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -25,8 +25,6 @@ var noEscape [256]bool var errValueNotSet = fmt.Errorf("value not set") -var byteSliceType = reflect.TypeOf([]byte{}) - func init() { for i := 0; i < len(noEscape); i++ { // AWS expects every character except these to be escaped @@ -96,14 +94,6 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo continue } - // Support the ability to customize values to be marshaled as a - // blob even though they were modeled as a string. Required for S3 - // API operations like SSECustomerKey is modeled as stirng but - // required to be base64 encoded in request. - if field.Tag.Get("marshal-as") == "blob" { - m = m.Convert(byteSliceType) - } - var err error switch field.Tag.Get("location") { case "headers": // header maps @@ -147,7 +137,7 @@ func buildBody(r *request.Request, v reflect.Value) { case string: r.SetStringBody(reader) default: - r.Error = awserr.New(request.ErrCodeSerialization, + r.Error = awserr.New("SerializationError", "failed to encode REST request", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -162,12 +152,9 @@ func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect. if err == errValueNotSet { return nil } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + return awserr.New("SerializationError", "failed to encode REST request", err) } - name = strings.TrimSpace(name) - str = strings.TrimSpace(str) - header.Add(name, str) return nil @@ -180,13 +167,11 @@ func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) if err == errValueNotSet { continue } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + return awserr.New("SerializationError", "failed to encode REST request", err) } - keyStr := strings.TrimSpace(key.String()) - str = strings.TrimSpace(str) - header.Add(prefix+keyStr, str) + header.Add(prefix+key.String(), str) } return nil } @@ -196,7 +181,7 @@ func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) e if err == errValueNotSet { return nil } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + return awserr.New("SerializationError", "failed to encode REST request", err) } u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) @@ -229,7 +214,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string, tag reflec if err == errValueNotSet { return nil } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + return awserr.New("SerializationError", "failed to encode REST request", err) } query.Set(name, str) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 74e361e0..33fd53b1 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -57,7 +57,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) } else { payload.Set(reflect.ValueOf(b)) } @@ -65,7 +65,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) } else { str := string(b) payload.Set(reflect.ValueOf(&str)) @@ -77,7 +77,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { case "io.ReadSeeker": b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, + r.Error = awserr.New("SerializationError", "failed to read response body", err) return } @@ -85,7 +85,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { default: io.Copy(ioutil.Discard, r.HTTPResponse.Body) defer r.HTTPResponse.Body.Close() - r.Error = awserr.New(request.ErrCodeSerialization, + r.Error = awserr.New("SerializationError", "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -115,14 +115,14 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { case "header": err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) break } case "headers": prefix := field.Tag.Get("locationName") err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) break } } @@ -146,9 +146,6 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { - if len(headers) == 0 { - return nil - } switch r.Interface().(type) { case map[string]*string: // we only support string map value types out := map[string]*string{} @@ -158,28 +155,19 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err out[k[len(prefix):]] = &v[0] } } - if len(out) != 0 { - r.Set(reflect.ValueOf(out)) - } - + r.Set(reflect.ValueOf(out)) } return nil } func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - switch tag.Get("type") { - case "jsonvalue": - if len(header) == 0 { - return nil - } - case "blob": + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { if len(header) == 0 { return nil } - default: - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil } switch v.Interface().(type) { @@ -190,7 +178,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro if err != nil { return err } - v.Set(reflect.ValueOf(b)) + v.Set(reflect.ValueOf(&b)) case *bool: b, err := strconv.ParseBool(header) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index 07a6187e..b0f4e245 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -37,9 +37,8 @@ func Build(r *request.Request) { err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to encode rest XML request", err), - 0, + awserr.New("SerializationError", "failed to encode rest XML request", err), + r.HTTPResponse.StatusCode, r.RequestID, ) return @@ -56,8 +55,7 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to decode REST XML response", err), + awserr.New("SerializationError", "failed to decode REST XML response", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index 05d4ff51..b7ed6c6f 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -1,11 +1,8 @@ package protocol import ( - "math" "strconv" "time" - - "github.com/aws/aws-sdk-go/internal/sdkmath" ) // Names of time formats supported by the SDK @@ -16,19 +13,12 @@ const ( ) // Time formats supported by the SDK -// Output time is intended to not contain decimals const ( // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" - // This format is used for output time without seconds precision - RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" - - // This format is used for output time without seconds precision - ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" + ISO8601TimeFormat = "2006-01-02T15:04:05Z" ) // IsKnownTimestampFormat returns if the timestamp format name @@ -52,9 +42,9 @@ func FormatTime(name string, t time.Time) string { switch name { case RFC822TimeFormatName: - return t.Format(RFC822OutputTimeFormat) + return t.Format(RFC822TimeFormat) case ISO8601TimeFormatName: - return t.Format(ISO8601OutputTimeFormat) + return t.Format(ISO8601TimeFormat) case UnixTimeFormatName: return strconv.FormatInt(t.Unix(), 10) default: @@ -72,12 +62,10 @@ func ParseTime(formatName, value string) (time.Time, error) { return time.Parse(ISO8601TimeFormat, value) case UnixTimeFormatName: v, err := strconv.ParseFloat(value, 64) - _, dec := math.Modf(v) - dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 if err != nil { return time.Time{}, err } - return time.Unix(int64(v), int64(dec*(1e9))), nil + return time.Unix(int64(v), 0), nil default: panic("unknown timestamp format name, " + formatName) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index cf981fe9..1bfe45f6 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -87,7 +87,7 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle } } -// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested // types are converted to XMLNodes also. func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { if !value.IsValid() { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go deleted file mode 100644 index c1a51185..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go +++ /dev/null @@ -1,32 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "strings" -) - -type xmlAttrSlice []xml.Attr - -func (x xmlAttrSlice) Len() int { - return len(x) -} - -func (x xmlAttrSlice) Less(i, j int) bool { - spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space - localI, localJ := x[i].Name.Local, x[j].Name.Local - valueI, valueJ := x[i].Value, x[j].Value - - spaceCmp := strings.Compare(spaceI, spaceJ) - localCmp := strings.Compare(localI, localJ) - valueCmp := strings.Compare(valueI, valueJ) - - if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { - return true - } - - return false -} - -func (x xmlAttrSlice) Swap(i, j int) { - x[i], x[j] = x[j], x[i] -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 7108d380..ff1ef683 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -1,7 +1,6 @@ package xmlutil import ( - "bytes" "encoding/base64" "encoding/xml" "fmt" @@ -11,27 +10,9 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/private/protocol" ) -// UnmarshalXMLError unmarshals the XML error from the stream into the value -// type specified. The value must be a pointer. If the message fails to -// unmarshal, the message content will be included in the returned error as a -// awserr.UnmarshalError. -func UnmarshalXMLError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := xml.NewDecoder(body).Decode(v) - if err != nil && err != io.EOF { - return awserr.NewUnmarshalError(err, - "failed to unmarshal error message", errBuf.Bytes()) - } - - return nil -} - // UnmarshalXML deserializes an xml.Decoder into the container v. V // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 42f71648..515ce152 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -119,18 +119,7 @@ func (n *XMLNode) findElem(name string) (string, bool) { // StructToXML writes an XMLNode to a xml.Encoder as tokens. func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - // Sort Attributes - attrs := node.Attr - if sorted { - sortedAttrs := make([]xml.Attr, len(attrs)) - for _, k := range node.Attr { - sortedAttrs = append(sortedAttrs, k) - } - sort.Sort(xmlAttrSlice(sortedAttrs)) - attrs = sortedAttrs - } - - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) if node.Text != "" { e.EncodeToken(xml.CharData([]byte(node.Text))) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index b4a4e8c4..d67cfde7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -460,7 +460,8 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request output = &DeleteBucketOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -536,7 +537,8 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt output = &DeleteBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -545,10 +547,6 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // Deletes an analytics configuration for the bucket (specified by the analytics // configuration ID). // -// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -616,7 +614,8 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request output = &DeleteBucketCorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -691,7 +690,8 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( output = &DeleteBucketEncryptionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -766,7 +766,8 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent output = &DeleteBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -842,7 +843,8 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re output = &DeleteBucketLifecycleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -917,7 +919,8 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC output = &DeleteBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -993,7 +996,8 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req output = &DeleteBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -1068,14 +1072,15 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) output = &DeleteBucketReplicationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketReplication API operation for Amazon Simple Storage Service. // // Deletes the replication configuration from the bucket. For information about -// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// replication configuration, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) // in the Amazon S3 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1145,7 +1150,8 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r output = &DeleteBucketTaggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -1220,7 +1226,8 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r output = &DeleteBucketWebsiteOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -1520,13 +1527,14 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) output = &DeletePublicAccessBlockOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } // DeletePublicAccessBlock API operation for Amazon Simple Storage Service. // -// Removes the PublicAccessBlock configuration from an Amazon S3 bucket. +// Removes the Public Access Block configuration for an Amazon S3 bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2051,7 +2059,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // GetBucketLifecycle API operation for Amazon Simple Storage Service. // -// No longer used, see the GetBucketLifecycleConfiguration operation. +// Deprecated, see the GetBucketLifecycleConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2432,7 +2440,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // GetBucketNotification API operation for Amazon Simple Storage Service. // -// No longer used, see the GetBucketNotificationConfiguration operation. +// Deprecated, see the GetBucketNotificationConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3221,230 +3229,6 @@ func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, return out, req.Send() } -const opGetObjectLegalHold = "GetObjectLegalHold" - -// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectLegalHold operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectLegalHold for more information on using the GetObjectLegalHold -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectLegalHoldRequest method. -// req, resp := client.GetObjectLegalHoldRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold -func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { - op := &request.Operation{ - Name: opGetObjectLegalHold, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?legal-hold", - } - - if input == nil { - input = &GetObjectLegalHoldInput{} - } - - output = &GetObjectLegalHoldOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectLegalHold API operation for Amazon Simple Storage Service. -// -// Gets an object's current Legal Hold status. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectLegalHold for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold -func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { - req, out := c.GetObjectLegalHoldRequest(input) - return out, req.Send() -} - -// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectLegalHold for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { - req, out := c.GetObjectLegalHoldRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectLockConfiguration = "GetObjectLockConfiguration" - -// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectLockConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectLockConfigurationRequest method. -// req, resp := client.GetObjectLockConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration -func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { - op := &request.Operation{ - Name: opGetObjectLockConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?object-lock", - } - - if input == nil { - input = &GetObjectLockConfigurationInput{} - } - - output = &GetObjectLockConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. -// -// Gets the object lock configuration for a bucket. The rule specified in the -// object lock configuration will be applied by default to every new object -// placed in the specified bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectLockConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration -func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { - req, out := c.GetObjectLockConfigurationRequest(input) - return out, req.Send() -} - -// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectLockConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { - req, out := c.GetObjectLockConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectRetention = "GetObjectRetention" - -// GetObjectRetentionRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectRetention operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectRetention for more information on using the GetObjectRetention -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetObjectRetentionRequest method. -// req, resp := client.GetObjectRetentionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention -func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { - op := &request.Operation{ - Name: opGetObjectRetention, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?retention", - } - - if input == nil { - input = &GetObjectRetentionInput{} - } - - output = &GetObjectRetentionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectRetention API operation for Amazon Simple Storage Service. -// -// Retrieves an object's retention settings. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectRetention for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention -func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { - req, out := c.GetObjectRetentionRequest(input) - return out, req.Send() -} - -// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectRetention for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { - req, out := c.GetObjectRetentionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - const opGetObjectTagging = "GetObjectTagging" // GetObjectTaggingRequest generates a "aws/request.Request" representing the @@ -3637,7 +3421,7 @@ func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req // GetPublicAccessBlock API operation for Amazon Simple Storage Service. // -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. +// Retrieves the Public Access Block configuration for an Amazon S3 bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3706,7 +3490,8 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou output = &HeadBucketOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -4214,7 +3999,7 @@ func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipa // // Example iterating over at most 3 pages of a ListMultipartUploads operation. // pageNum := 0 // err := client.ListMultipartUploadsPages(params, -// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4344,7 +4129,7 @@ func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVer // // Example iterating over at most 3 pages of a ListObjectVersions operation. // pageNum := 0 // err := client.ListObjectVersionsPages(params, -// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { +// func(page *ListObjectVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4481,7 +4266,7 @@ func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, op // // Example iterating over at most 3 pages of a ListObjects operation. // pageNum := 0 // err := client.ListObjectsPages(params, -// func(page *s3.ListObjectsOutput, lastPage bool) bool { +// func(page *ListObjectsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4619,7 +4404,7 @@ func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input // // Example iterating over at most 3 pages of a ListObjectsV2 operation. // pageNum := 0 // err := client.ListObjectsV2Pages(params, -// func(page *s3.ListObjectsV2Output, lastPage bool) bool { +// func(page *ListObjectsV2Output, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4749,7 +4534,7 @@ func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts . // // Example iterating over at most 3 pages of a ListParts operation. // pageNum := 0 // err := client.ListPartsPages(params, -// func(page *s3.ListPartsOutput, lastPage bool) bool { +// func(page *ListPartsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4827,7 +4612,8 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC output = &PutBucketAccelerateConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -4902,7 +4688,8 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request output = &PutBucketAclOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -4977,7 +4764,8 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon output = &PutBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -5053,7 +4841,8 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque output = &PutBucketCorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -5128,7 +4917,8 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r output = &PutBucketEncryptionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -5204,7 +4994,8 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon output = &PutBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -5285,13 +5076,14 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req output = &PutBucketLifecycleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } // PutBucketLifecycle API operation for Amazon Simple Storage Service. // -// No longer used, see the PutBucketLifecycleConfiguration operation. +// Deprecated, see the PutBucketLifecycleConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5364,7 +5156,8 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon output = &PutBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -5440,7 +5233,8 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request output = &PutBucketLoggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -5517,7 +5311,8 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu output = &PutBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -5598,13 +5393,14 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re output = &PutBucketNotificationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } // PutBucketNotification API operation for Amazon Simple Storage Service. // -// No longer used, see the PutBucketNotificationConfiguration operation. +// Deprecated, see the PutBucketNotificationConfiguraiton operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5677,7 +5473,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat output = &PutBucketNotificationConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -5752,13 +5549,15 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R output = &PutBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } // PutBucketPolicy API operation for Amazon Simple Storage Service. // -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5827,14 +5626,15 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req output = &PutBucketReplicationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } // PutBucketReplication API operation for Amazon Simple Storage Service. // // Creates a replication configuration or replaces an existing one. For more -// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) // in the Amazon S3 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5904,7 +5704,8 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) output = &PutBucketRequestPaymentOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -5983,7 +5784,8 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request output = &PutBucketTaggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -6058,7 +5860,8 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r output = &PutBucketVersioningOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -6134,7 +5937,8 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request output = &PutBucketWebsiteOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } @@ -6324,497 +6128,274 @@ func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, return out, req.Send() } -const opPutObjectLegalHold = "PutObjectLegalHold" +const opPutObjectTagging = "PutObjectTagging" -// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectLegalHold operation. The "output" return +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutObjectLegalHold for more information on using the PutObjectLegalHold +// See PutObjectTagging for more information on using the PutObjectTagging // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutObjectLegalHoldRequest method. -// req, resp := client.PutObjectLegalHoldRequest(params) +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold -func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { op := &request.Operation{ - Name: opPutObjectLegalHold, + Name: opPutObjectTagging, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?legal-hold", + HTTPPath: "/{Bucket}/{Key+}?tagging", } if input == nil { - input = &PutObjectLegalHoldInput{} + input = &PutObjectTaggingInput{} } - output = &PutObjectLegalHoldOutput{} + output = &PutObjectTaggingOutput{} req = c.newRequest(op, input, output) return } -// PutObjectLegalHold API operation for Amazon Simple Storage Service. +// PutObjectTagging API operation for Amazon Simple Storage Service. // -// Applies a Legal Hold configuration to the specified object. +// Sets the supplied tag-set to an object that already exists in a bucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectLegalHold for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold -func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { - req, out := c.PutObjectLegalHoldRequest(input) +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) return out, req.Send() } -// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of // the ability to pass a context and additional request options. // -// See PutObjectLegalHold for details on how to use this API operation. +// See PutObjectTagging for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { - req, out := c.PutObjectLegalHoldRequest(input) +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutObjectLockConfiguration = "PutObjectLockConfiguration" +const opPutPublicAccessBlock = "PutPublicAccessBlock" -// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectLockConfiguration operation. The "output" return +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutObjectLockConfigurationRequest method. -// req, resp := client.PutObjectLockConfigurationRequest(params) +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration -func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { op := &request.Operation{ - Name: opPutObjectLockConfiguration, + Name: opPutPublicAccessBlock, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?object-lock", + HTTPPath: "/{Bucket}?publicAccessBlock", } if input == nil { - input = &PutObjectLockConfigurationInput{} + input = &PutPublicAccessBlockInput{} } - output = &PutObjectLockConfigurationOutput{} + output = &PutPublicAccessBlockOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. // -// Places an object lock configuration on the specified bucket. The rule specified -// in the object lock configuration will be applied by default to every new -// object placed in the specified bucket. +// Creates or modifies the Public Access Block configuration for an Amazon S3 +// bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectLockConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration -func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { - req, out := c.PutObjectLockConfigurationRequest(input) +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) return out, req.Send() } -// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of // the ability to pass a context and additional request options. // -// See PutObjectLockConfiguration for details on how to use this API operation. +// See PutPublicAccessBlock for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { - req, out := c.PutObjectLockConfigurationRequest(input) +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutObjectRetention = "PutObjectRetention" +const opRestoreObject = "RestoreObject" -// PutObjectRetentionRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectRetention operation. The "output" return +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutObjectRetention for more information on using the PutObjectRetention +// See RestoreObject for more information on using the RestoreObject // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutObjectRetentionRequest method. -// req, resp := client.PutObjectRetentionRequest(params) +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention -func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { op := &request.Operation{ - Name: opPutObjectRetention, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?retention", + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", } if input == nil { - input = &PutObjectRetentionInput{} + input = &RestoreObjectInput{} } - output = &PutObjectRetentionOutput{} + output = &RestoreObjectOutput{} req = c.newRequest(op, input, output) return } -// PutObjectRetention API operation for Amazon Simple Storage Service. +// RestoreObject API operation for Amazon Simple Storage Service. // -// Places an Object Retention configuration on an object. +// Restores an archived copy of an object back into Amazon S3 // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectRetention for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention -func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { - req, out := c.PutObjectRetentionRequest(input) +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) return out, req.Send() } -// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of +// RestoreObjectWithContext is the same as RestoreObject with the addition of // the ability to pass a context and additional request options. // -// See PutObjectRetention for details on how to use this API operation. +// See RestoreObject for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { - req, out := c.PutObjectRetentionRequest(input) +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutObjectTagging = "PutObjectTagging" +const opSelectObjectContent = "SelectObjectContent" -// PutObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectTagging operation. The "output" return +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutObjectTagging for more information on using the PutObjectTagging +// See SelectObjectContent for more information on using the SelectObjectContent // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutObjectTaggingRequest method. -// req, resp := client.PutObjectTaggingRequest(params) +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { op := &request.Operation{ - Name: opPutObjectTagging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?tagging", + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", } if input == nil { - input = &PutObjectTaggingInput{} + input = &SelectObjectContentInput{} } - output = &PutObjectTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObjectTagging API operation for Amazon Simple Storage Service. -// -// Sets the supplied tag-set to an object that already exists in a bucket -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) - return out, req.Send() -} - -// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutPublicAccessBlock = "PutPublicAccessBlock" - -// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the PutPublicAccessBlock operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutPublicAccessBlockRequest method. -// req, resp := client.PutPublicAccessBlockRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock -func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { - op := &request.Operation{ - Name: opPutPublicAccessBlock, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?publicAccessBlock", - } - - if input == nil { - input = &PutPublicAccessBlockInput{} - } - - output = &PutPublicAccessBlockOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutPublicAccessBlock API operation for Amazon Simple Storage Service. -// -// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 -// bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutPublicAccessBlock for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock -func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { - req, out := c.PutPublicAccessBlockRequest(input) - return out, req.Send() -} - -// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of -// the ability to pass a context and additional request options. -// -// See PutPublicAccessBlock for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { - req, out := c.PutPublicAccessBlockRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRestoreObject = "RestoreObject" - -// RestoreObjectRequest generates a "aws/request.Request" representing the -// client's request for the RestoreObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RestoreObject for more information on using the RestoreObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RestoreObjectRequest method. -// req, resp := client.RestoreObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { - op := &request.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", - } - - if input == nil { - input = &RestoreObjectInput{} - } - - output = &RestoreObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// RestoreObject API operation for Amazon Simple Storage Service. -// -// Restores an archived copy of an object back into Amazon S3 -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation RestoreObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - return out, req.Send() -} - -// RestoreObjectWithContext is the same as RestoreObject with the addition of -// the ability to pass a context and additional request options. -// -// See RestoreObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSelectObjectContent = "SelectObjectContent" - -// SelectObjectContentRequest generates a "aws/request.Request" representing the -// client's request for the SelectObjectContent operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SelectObjectContent for more information on using the SelectObjectContent -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SelectObjectContentRequest method. -// req, resp := client.SelectObjectContentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent -func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { - op := &request.Operation{ - Name: opSelectObjectContent, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", - } - - if input == nil { - input = &SelectObjectContentInput{} - } - - output = &SelectObjectContentOutput{} + output = &SelectObjectContentOutput{} req = c.newRequest(op, input, output) req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) @@ -7013,16 +6594,13 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp return out, req.Send() } -// Specifies the days since the initiation of an incomplete multipart upload -// that Amazon S3 will wait before permanently removing all parts of the upload. -// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// in the Amazon Simple Storage Service Developer Guide. +// Specifies the days since the initiation of an Incomplete Multipart Upload +// that Lifecycle will wait before permanently removing all parts of the upload. type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` - // Specifies the number of days after which Amazon S3 aborts an incomplete multipart - // upload. + // Indicates the number of days that must pass since initiation for Lifecycle + // to abort an Incomplete Multipart Upload. DaysAfterInitiation *int64 `type:"integer"` } @@ -7043,15 +6621,11 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI } type AbortMultipartUploadInput struct { - _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + _ struct{} `type:"structure"` - // Name of the bucket to which the multipart upload was initiated. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Key of the object for which the multipart upload was initiated. - // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -7061,8 +6635,6 @@ type AbortMultipartUploadInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Upload ID that identifies the multipart upload. - // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -7083,9 +6655,6 @@ func (s *AbortMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -7157,13 +6726,10 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart return s } -// Configures the transfer acceleration state for an Amazon S3 bucket. For more -// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon Simple Storage Service Developer Guide. type AccelerateConfiguration struct { _ struct{} `type:"structure"` - // Specifies the transfer acceleration status of the bucket. + // The accelerate configuration of the bucket. Status *string `type:"string" enum:"BucketAccelerateStatus"` } @@ -7183,14 +6749,12 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { return s } -// Contains the elements that set the ACL permissions for an object per grantee. type AccessControlPolicy struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -7240,9 +6804,7 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { type AccessControlTranslation struct { _ struct{} `type:"structure"` - // Specifies the replica ownership. For default and valid values, see PUT bucket - // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // in the Amazon Simple Storage Service API Reference. + // The override value for the owner of the replica object. // // Owner is a required field Owner *string `type:"string" required:"true" enum:"OwnerOverride"` @@ -7277,14 +6839,10 @@ func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation return s } -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates in any combination, -// and an object must match all of the predicates for the filter to apply. type AnalyticsAndOperator struct { _ struct{} `type:"structure"` - // The prefix to use when evaluating an AND predicate: The prefix that an object - // must have to be included in the metrics results. + // The prefix to use when evaluating an AND predicate. Prefix *string `type:"string"` // The list of tags to use when evaluating an AND predicate. @@ -7333,11 +6891,6 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { return s } -// Specifies the configuration and any analyses for the analytics filter of -// an Amazon S3 bucket. -// -// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html) -// in the Amazon Simple Storage Service API Reference. type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -7346,13 +6899,13 @@ type AnalyticsConfiguration struct { // If no filter is provided, all objects will be considered in any analysis. Filter *AnalyticsFilter `type:"structure"` - // The ID that identifies the analytics configuration. + // The identifier used to represent an analytics configuration. // // Id is a required field Id *string `type:"string" required:"true"` - // Contains data related to access patterns to be collected and made available - // to analyze the tradeoffs between different storage classes. + // If present, it indicates that data related to access patterns will be collected + // and made available to analyze the tradeoffs between different storage classes. // // StorageClassAnalysis is a required field StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` @@ -7412,7 +6965,6 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis return s } -// Where to publish the analytics results. type AnalyticsExportDestination struct { _ struct{} `type:"structure"` @@ -7521,7 +7073,7 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the bucket to which data is exported. + // The Amazon resource name (ARN) of the bucket to which data is exported. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -7530,12 +7082,13 @@ type AnalyticsS3BucketDestination struct { // the owner will not be validated prior to exporting data. BucketAccountId *string `type:"string"` - // Specifies the file format used when exporting data to Amazon S3. + // The file format used when exporting data to Amazon S3. // // Format is a required field Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` - // The prefix to use when exporting data. The prefix is prepended to all results. + // The prefix to use when exporting data. The exported data begins with this + // prefix. Prefix *string `type:"string"` } @@ -7628,14 +7181,9 @@ func (s *Bucket) SetName(v string) *Bucket { return s } -// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. -// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon Simple Storage Service Developer Guide. type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` - // A lifecycle rule for individual objects in an Amazon S3 bucket. - // // Rules is a required field Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -7682,10 +7230,9 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec type BucketLoggingStatus struct { _ struct{} `type:"structure"` - // Describes where logs are stored and the prefix that Amazon S3 assigns to - // all log object keys for a bucket. For more information, see PUT Bucket logging - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon Simple Storage Service API Reference. + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -7720,15 +7267,9 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin return s } -// Describes the cross-origin access configuration for objects in an Amazon -// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// Simple Storage Service Developer Guide. type CORSConfiguration struct { _ struct{} `type:"structure"` - // A set of allowed origins and methods. - // // CORSRules is a required field CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` } @@ -7772,18 +7313,14 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { return s } -// Specifies a cross-origin access rule for an Amazon S3 bucket. type CORSRule struct { _ struct{} `type:"structure"` - // Headers that are specified in the Access-Control-Request-Headers header. - // These headers are allowed in a preflight OPTIONS request. In response to - // any preflight OPTIONS request, Amazon S3 returns any requested headers that - // are allowed. + // Specifies which headers are allowed in a pre-flight OPTIONS request. AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - // An HTTP method that you allow the origin to execute. Valid values are GET, - // PUT, HEAD, POST, and DELETE. + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. // // AllowedMethods is a required field AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` @@ -8084,7 +7621,7 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { } type CompleteMultipartUploadInput struct { - _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + _ struct{} `type:"structure" payload:"MultipartUpload"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -8120,9 +7657,6 @@ func (s *CompleteMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -8198,7 +7732,7 @@ type CompleteMultipartUploadOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -8334,7 +7868,6 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } -// Specifies a condition that must be met for a redirect to apply. type Condition struct { _ struct{} `type:"structure"` @@ -8404,7 +7937,7 @@ func (s *ContinuationEvent) UnmarshalEvent( } type CopyObjectInput struct { - _ struct{} `locationName:"CopyObjectRequest" type:"structure"` + _ struct{} `type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` @@ -8454,7 +7987,7 @@ type CopyObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -8486,15 +8019,6 @@ type CopyObjectInput struct { // with metadata provided in the request. MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` - // Specifies whether you want to apply a Legal Hold to the copied object. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The object lock mode that you want to apply to the copied object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when you want the copied object's object lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. // Documentation on downloading objects from requester pays buckets can be found @@ -8509,23 +8033,18 @@ type CopyObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // Specifies the AWS KMS Encryption Context to use for object encryption. The - // value of this header is a base64-encoded UTF-8 string holding JSON with the - // encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -8565,9 +8084,6 @@ func (s *CopyObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.CopySource == nil { invalidParams.Add(request.NewErrParamRequired("CopySource")) } @@ -8736,24 +8252,6 @@ func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { return s } -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { - s.ObjectLockRetainUntilDate = &v - return s -} - // SetRequestPayer sets the RequestPayer field's value. func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { s.RequestPayer = &v @@ -8785,12 +8283,6 @@ func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { return s } -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { - s.SSEKMSEncryptionContext = &v - return s -} - // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { s.SSEKMSKeyId = &v @@ -8851,14 +8343,9 @@ type CopyObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the AWS KMS Encryption Context to use for object encryption. - // The value of this header is a base64-encoded UTF-8 string holding JSON with - // the encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -8914,12 +8401,6 @@ func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { return s } -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { - s.SSEKMSEncryptionContext = &v - return s -} - // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { s.SSEKMSKeyId = &v @@ -9004,7 +8485,7 @@ type CreateBucketConfiguration struct { _ struct{} `type:"structure"` // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket is created in US East (N. Virginia) Region (us-east-1). + // a region, the bucket will be created in US Standard. LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -9025,7 +8506,7 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke } type CreateBucketInput struct { - _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` @@ -9050,10 +8531,6 @@ type CreateBucketInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Specifies whether you want Amazon S3 object lock to be enabled for the new - // bucket. - ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` } // String returns the string representation @@ -9072,9 +8549,6 @@ func (s *CreateBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -9137,12 +8611,6 @@ func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { return s } -// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. -func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { - s.ObjectLockEnabledForBucket = &v - return s -} - type CreateBucketOutput struct { _ struct{} `type:"structure"` @@ -9166,7 +8634,7 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { } type CreateMultipartUploadInput struct { - _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` + _ struct{} `type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` @@ -9212,15 +8680,6 @@ type CreateMultipartUploadInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // Specifies whether you want to apply a Legal Hold to the uploaded object. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // Specifies the object lock mode that you want to apply to the uploaded object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // Specifies the date and time when you want the object lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. // Documentation on downloading objects from requester pays buckets can be found @@ -9235,23 +8694,18 @@ type CreateMultipartUploadInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // Specifies the AWS KMS Encryption Context to use for object encryption. The - // value of this header is a base64-encoded UTF-8 string holding JSON with the - // encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -9285,9 +8739,6 @@ func (s *CreateMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -9392,24 +8843,6 @@ func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMu return s } -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { - s.ObjectLockRetainUntilDate = &v - return s -} - // SetRequestPayer sets the RequestPayer field's value. func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { s.RequestPayer = &v @@ -9441,12 +8874,6 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMulti return s } -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { - s.SSEKMSEncryptionContext = &v - return s -} - // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { s.SSEKMSKeyId = &v @@ -9507,14 +8934,9 @@ type CreateMultipartUploadOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the AWS KMS Encryption Context to use for object encryption. - // The value of this header is a base64-encoded UTF-8 string holding JSON with - // the encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -9574,80 +8996,30 @@ func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipa // SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { - s.ServerSideEncryption = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { - s.UploadId = &v - return s -} - -// The container element for specifying the default object lock retention settings -// for new objects placed in the specified bucket. -type DefaultRetention struct { - _ struct{} `type:"structure"` - - // The number of days that you want to specify for the default retention period. - Days *int64 `type:"integer"` - - // The default object lock retention mode you want to apply to new objects placed - // in the specified bucket. - Mode *string `type:"string" enum:"ObjectLockRetentionMode"` - - // The number of years that you want to specify for the default retention period. - Years *int64 `type:"integer"` -} - -// String returns the string representation -func (s DefaultRetention) String() string { - return awsutil.Prettify(s) + return s } -// GoString returns the string representation -func (s DefaultRetention) GoString() string { - return s.String() +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s } -// SetDays sets the Days field's value. -func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { - s.Days = &v +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v return s } -// SetMode sets the Mode field's value. -func (s *DefaultRetention) SetMode(v string) *DefaultRetention { - s.Mode = &v +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v return s } -// SetYears sets the Years field's value. -func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { - s.Years = &v +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v return s } @@ -9708,14 +9080,14 @@ func (s *Delete) SetQuiet(v bool) *Delete { } type DeleteBucketAnalyticsConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket from which an analytics configuration is deleted. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The ID that identifies the analytics configuration. + // The identifier used to represent an analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -9737,9 +9109,6 @@ func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -9784,7 +9153,7 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { } type DeleteBucketCorsInput struct { - _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9806,9 +9175,6 @@ func (s *DeleteBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -9844,7 +9210,7 @@ func (s DeleteBucketCorsOutput) GoString() string { } type DeleteBucketEncryptionInput struct { - _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket containing the server-side encryption configuration // to delete. @@ -9869,9 +9235,6 @@ func (s *DeleteBucketEncryptionInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -9907,7 +9270,7 @@ func (s DeleteBucketEncryptionOutput) GoString() string { } type DeleteBucketInput struct { - _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9929,9 +9292,6 @@ func (s *DeleteBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -9953,7 +9313,7 @@ func (s *DeleteBucketInput) getBucket() (v string) { } type DeleteBucketInventoryConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket containing the inventory configuration to delete. // @@ -9982,9 +9342,6 @@ func (s *DeleteBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -10029,7 +9386,7 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { } type DeleteBucketLifecycleInput struct { - _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10051,9 +9408,6 @@ func (s *DeleteBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -10089,7 +9443,7 @@ func (s DeleteBucketLifecycleOutput) GoString() string { } type DeleteBucketMetricsConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket containing the metrics configuration to delete. // @@ -10118,9 +9472,6 @@ func (s *DeleteBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -10179,7 +9530,7 @@ func (s DeleteBucketOutput) GoString() string { } type DeleteBucketPolicyInput struct { - _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10201,9 +9552,6 @@ func (s *DeleteBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -10239,7 +9587,7 @@ func (s DeleteBucketPolicyOutput) GoString() string { } type DeleteBucketReplicationInput struct { - _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` + _ struct{} `type:"structure"` // The bucket name. // @@ -10266,9 +9614,6 @@ func (s *DeleteBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -10304,7 +9649,7 @@ func (s DeleteBucketReplicationOutput) GoString() string { } type DeleteBucketTaggingInput struct { - _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10326,9 +9671,6 @@ func (s *DeleteBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -10364,7 +9706,7 @@ func (s DeleteBucketTaggingOutput) GoString() string { } type DeleteBucketWebsiteInput struct { - _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10386,9 +9728,6 @@ func (s *DeleteBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -10510,15 +9849,11 @@ func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { } type DeleteObjectInput struct { - _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions - // to process this operation. - BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -10552,9 +9887,6 @@ func (s *DeleteObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -10581,12 +9913,6 @@ func (s *DeleteObjectInput) getBucket() (v string) { return *s.Bucket } -// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. -func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { - s.BypassGovernanceRetention = &v - return s -} - // SetKey sets the Key field's value. func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { s.Key = &v @@ -10656,7 +9982,7 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { } type DeleteObjectTaggingInput struct { - _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10684,9 +10010,6 @@ func (s *DeleteObjectTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -10749,16 +10072,11 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO } type DeleteObjectsInput struct { - _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + _ struct{} `type:"structure" payload:"Delete"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies whether you want to delete this object even if it has a Governance-type - // object lock in place. You must have sufficient permissions to perform this - // operation. - BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -10789,9 +10107,6 @@ func (s *DeleteObjectsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Delete == nil { invalidParams.Add(request.NewErrParamRequired("Delete")) } @@ -10820,12 +10135,6 @@ func (s *DeleteObjectsInput) getBucket() (v string) { return *s.Bucket } -// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. -func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { - s.BypassGovernanceRetention = &v - return s -} - // SetDelete sets the Delete field's value. func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { s.Delete = v @@ -10885,9 +10194,10 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { } type DeletePublicAccessBlockInput struct { - _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` + _ struct{} `type:"structure"` - // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // The Amazon S3 bucket whose Public Access Block configuration you want to + // delete. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10909,9 +10219,6 @@ func (s *DeletePublicAccessBlockInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -10992,33 +10299,33 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { return s } -// Specifies information about where to publish analysis or configuration results -// for an Amazon S3 bucket. +// A container for information about the replication destination. type Destination struct { _ struct{} `type:"structure"` - // Specify this only in a cross-account scenario (where source and destination - // bucket owners are not the same), and you want to change replica ownership - // to the AWS account that owns the destination bucket. If this is not specified - // in the replication configuration, the replicas are owned by same AWS account - // that owns the source object. + // A container for information about access control for replicas. + // + // Use this element only in a cross-account scenario where source and destination + // bucket owners are not the same to change replica ownership to the AWS account + // that owns the destination bucket. If you don't add this element to the replication + // configuration, the replicas are owned by same AWS account that owns the source + // object. AccessControlTranslation *AccessControlTranslation `type:"structure"` - // Destination bucket owner account ID. In a cross-account scenario, if you - // direct Amazon S3 to change replica ownership to the AWS account that owns - // the destination bucket by specifying the AccessControlTranslation property, - // this is the account ID of the destination bucket owner. For more information, - // see Cross-Region Replication Additional Configuration: Change Replica Owner - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in - // the Amazon Simple Storage Service Developer Guide. + // The account ID of the destination bucket. Currently, Amazon S3 verifies this + // value only if Access Control Translation is enabled. + // + // In a cross-account scenario, if you change replica ownership to the AWS account + // that owns the destination bucket by adding the AccessControlTranslation element, + // this is the account ID of the owner of the destination bucket. Account *string `type:"string"` // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to // store replicas of the object identified by the rule. // - // A replication configuration can replicate objects to only one destination - // bucket. If there are multiple rules in your replication configuration, all - // rules must specify the same destination bucket. + // If there are multiple rules in your replication configuration, all rules + // must specify the same bucket as the destination. A replication configuration + // can replicate objects to only one destination bucket. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -11027,13 +10334,8 @@ type Destination struct { // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // The storage class to use when replicating objects, such as standard or reduced - // redundancy. By default, Amazon S3 uses the storage class of the source object - // to create the object replica. - // - // For valid values, see the StorageClass element of the PUT Bucket replication - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // action in the Amazon Simple Storage Service API Reference. + // The class of storage used to store the object. By default Amazon S3 uses + // storage class of the source object when creating a replica. StorageClass *string `type:"string" enum:"StorageClass"` } @@ -11119,7 +10421,7 @@ type Encryption struct { // If the encryption type is aws:kms, this optional value specifies the AWS // KMS key ID to use for encryption of job results. - KMSKeyId *string `type:"string" sensitive:"true"` + KMSKeyId *string `type:"string"` } // String returns the string representation @@ -11163,13 +10465,13 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption { return s } -// Specifies encryption-related information for an Amazon S3 bucket that is -// a destination for replicated objects. +// A container for information about the encryption-based configuration for +// replicas. type EncryptionConfiguration struct { _ struct{} `type:"structure"` - // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket. - // Amazon S3 uses this key to encrypt replica objects. + // The ID of the AWS KMS key for the AWS Region where the destination bucket + // resides. Amazon S3 uses this key to encrypt the replica object. ReplicaKmsKeyID *string `type:"string"` } @@ -11302,19 +10604,18 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { return s } -// Specifies the Amazon S3 object key name to filter on and whether to filter -// on the suffix or prefix of the key name. +// A container for a key value pair that defines the criteria for the filter +// rule. type FilterRule struct { _ struct{} `type:"structure"` // The object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. The maximum length is 1,024 characters. Overlapping - // prefixes and suffixes are not supported. For more information, see Configuring - // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // the filtering rule applies. The maximum prefix length is 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string" enum:"FilterRuleName"` - // The value that the filter searches for in object key names. Value *string `type:"string"` } @@ -11341,7 +10642,7 @@ func (s *FilterRule) SetValue(v string) *FilterRule { } type GetBucketAccelerateConfigurationInput struct { - _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` + _ struct{} `type:"structure"` // Name of the bucket for which the accelerate configuration is retrieved. // @@ -11365,9 +10666,6 @@ func (s *GetBucketAccelerateConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -11412,7 +10710,7 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA } type GetBucketAclInput struct { - _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11434,9 +10732,6 @@ func (s *GetBucketAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -11489,14 +10784,14 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { } type GetBucketAnalyticsConfigurationInput struct { - _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket from which an analytics configuration is retrieved. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The ID that identifies the analytics configuration. + // The identifier used to represent an analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -11518,9 +10813,6 @@ func (s *GetBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -11574,7 +10866,7 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana } type GetBucketCorsInput struct { - _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11596,9 +10888,6 @@ func (s *GetBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -11642,7 +10931,7 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { } type GetBucketEncryptionInput struct { - _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket from which the server-side encryption configuration // is retrieved. @@ -11667,9 +10956,6 @@ func (s *GetBucketEncryptionInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -11693,7 +10979,8 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) { type GetBucketEncryptionOutput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` - // Specifies the default server-side-encryption configuration. + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` } @@ -11714,7 +11001,7 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv } type GetBucketInventoryConfigurationInput struct { - _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket containing the inventory configuration to retrieve. // @@ -11743,9 +11030,6 @@ func (s *GetBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -11799,7 +11083,7 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv } type GetBucketLifecycleConfigurationInput struct { - _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11821,9 +11105,6 @@ func (s *GetBucketLifecycleConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -11867,7 +11148,7 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge } type GetBucketLifecycleInput struct { - _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11889,9 +11170,6 @@ func (s *GetBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -11935,7 +11213,7 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput } type GetBucketLocationInput struct { - _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11957,9 +11235,6 @@ func (s *GetBucketLocationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12003,7 +11278,7 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca } type GetBucketLoggingInput struct { - _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12025,9 +11300,6 @@ func (s *GetBucketLoggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12051,10 +11323,9 @@ func (s *GetBucketLoggingInput) getBucket() (v string) { type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` - // Describes where logs are stored and the prefix that Amazon S3 assigns to - // all log object keys for a bucket. For more information, see PUT Bucket logging - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon Simple Storage Service API Reference. + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -12075,7 +11346,7 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket } type GetBucketMetricsConfigurationInput struct { - _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket containing the metrics configuration to retrieve. // @@ -12104,9 +11375,6 @@ func (s *GetBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -12160,7 +11428,7 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics } type GetBucketNotificationConfigurationRequest struct { - _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` + _ struct{} `type:"structure"` // Name of the bucket to get the notification configuration for. // @@ -12184,9 +11452,6 @@ func (s *GetBucketNotificationConfigurationRequest) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12208,7 +11473,7 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { } type GetBucketPolicyInput struct { - _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12230,9 +11495,6 @@ func (s *GetBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12277,9 +11539,9 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { } type GetBucketPolicyStatusInput struct { - _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` + _ struct{} `type:"structure"` - // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // The name of the Amazon S3 bucket whose public-policy status you want to retrieve. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12301,9 +11563,6 @@ func (s *GetBucketPolicyStatusInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12327,7 +11586,7 @@ func (s *GetBucketPolicyStatusInput) getBucket() (v string) { type GetBucketPolicyStatusOutput struct { _ struct{} `type:"structure" payload:"PolicyStatus"` - // The policy status for the specified bucket. + // The public-policy status for this bucket. PolicyStatus *PolicyStatus `type:"structure"` } @@ -12348,7 +11607,7 @@ func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucke } type GetBucketReplicationInput struct { - _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12370,9 +11629,6 @@ func (s *GetBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12418,7 +11674,7 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC } type GetBucketRequestPaymentInput struct { - _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12440,9 +11696,6 @@ func (s *GetBucketRequestPaymentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12487,7 +11740,7 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym } type GetBucketTaggingInput struct { - _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12509,9 +11762,6 @@ func (s *GetBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12556,7 +11806,7 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { } type GetBucketVersioningInput struct { - _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12578,9 +11828,6 @@ func (s *GetBucketVersioningInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12636,7 +11883,7 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp } type GetBucketWebsiteInput struct { - _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12658,9 +11905,6 @@ func (s *GetBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -12688,8 +11932,6 @@ type GetBucketWebsiteOutput struct { IndexDocument *IndexDocument `type:"structure"` - // Specifies the redirect behavior of all requests to a website endpoint of - // an Amazon S3 bucket. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` @@ -12730,7 +11972,7 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb } type GetObjectAclInput struct { - _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12764,9 +12006,6 @@ func (s *GetObjectAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -12853,7 +12092,7 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { } type GetObjectInput struct { - _ struct{} `locationName:"GetObjectRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12918,7 +12157,7 @@ type GetObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -12945,9 +12184,6 @@ func (s *GetObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -13046,226 +12282,46 @@ func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { return s } -// SetResponseContentType sets the ResponseContentType field's value. -func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { - s.ResponseContentType = &v - return s -} - -// SetResponseExpires sets the ResponseExpires field's value. -func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { - s.ResponseExpires = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { - s.SSECustomerKey = &v - return s -} - -func (s *GetObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { - s.VersionId = &v - return s -} - -type GetObjectLegalHoldInput struct { - _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` - - // The bucket containing the object whose Legal Hold status you want to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The key name for the object whose Legal Hold status you want to retrieve. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The version ID of the object whose Legal Hold status you want to retrieve. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectLegalHoldInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectLegalHoldInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectLegalHoldInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { - s.Bucket = &v - return s -} - -func (s *GetObjectLegalHoldInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { - s.VersionId = &v - return s -} - -type GetObjectLegalHoldOutput struct { - _ struct{} `type:"structure" payload:"LegalHold"` - - // The current Legal Hold status for the specified object. - LegalHold *ObjectLockLegalHold `type:"structure"` -} - -// String returns the string representation -func (s GetObjectLegalHoldOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectLegalHoldOutput) GoString() string { - return s.String() -} - -// SetLegalHold sets the LegalHold field's value. -func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { - s.LegalHold = v - return s -} - -type GetObjectLockConfigurationInput struct { - _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` - - // The bucket whose object lock configuration you want to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetObjectLockConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectLockConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectLockConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { - s.Bucket = &v +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v return s } -func (s *GetObjectLockConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s } -type GetObjectLockConfigurationOutput struct { - _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} - // The specified bucket's object lock configuration. - ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s } -// String returns the string representation -func (s GetObjectLockConfigurationOutput) String() string { - return awsutil.Prettify(s) +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey } -// GoString returns the string representation -func (s GetObjectLockConfigurationOutput) GoString() string { - return s.String() +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s } -// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. -func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { - s.ObjectLockConfiguration = v +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v return s } @@ -13329,16 +12385,6 @@ type GetObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The object lock mode currently in place for this object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when this object's object lock will expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` @@ -13364,7 +12410,7 @@ type GetObjectOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -13490,24 +12536,6 @@ func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { return s } -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { - s.ObjectLockRetainUntilDate = &v - return s -} - // SetPartsCount sets the PartsCount field's value. func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { s.PartsCount = &v @@ -13580,117 +12608,8 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput return s } -type GetObjectRetentionInput struct { - _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` - - // The bucket containing the object whose retention settings you want to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The key name for the object whose retention settings you want to retrieve. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The version ID for the object whose retention settings you want to retrieve. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectRetentionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectRetentionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectRetentionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { - s.Bucket = &v - return s -} - -func (s *GetObjectRetentionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { - s.VersionId = &v - return s -} - -type GetObjectRetentionOutput struct { - _ struct{} `type:"structure" payload:"Retention"` - - // The container element for an object's retention settings. - Retention *ObjectLockRetention `type:"structure"` -} - -// String returns the string representation -func (s GetObjectRetentionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectRetentionOutput) GoString() string { - return s.String() -} - -// SetRetention sets the Retention field's value. -func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { - s.Retention = v - return s -} - type GetObjectTaggingInput struct { - _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13717,9 +12636,6 @@ func (s *GetObjectTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -13790,7 +12706,7 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput } type GetObjectTorrentInput struct { - _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13821,9 +12737,6 @@ func (s *GetObjectTorrentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -13895,10 +12808,10 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu } type GetPublicAccessBlockInput struct { - _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` + _ struct{} `type:"structure"` - // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you - // want to retrieve. + // The name of the Amazon S3 bucket whose Public Access Block configuration + // you want to retrieve. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13920,9 +12833,6 @@ func (s *GetPublicAccessBlockInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -13946,8 +12856,8 @@ func (s *GetPublicAccessBlockInput) getBucket() (v string) { type GetPublicAccessBlockOutput struct { _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` - // The PublicAccessBlock configuration currently in effect for this Amazon S3 - // bucket. + // The Public Access Block configuration currently in effect for this Amazon + // S3 bucket. PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` } @@ -14126,7 +13036,7 @@ func (s *Grantee) SetURI(v string) *Grantee { } type HeadBucketInput struct { - _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14148,9 +13058,6 @@ func (s *HeadBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -14186,7 +13093,7 @@ func (s HeadBucketOutput) GoString() string { } type HeadObjectInput struct { - _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14234,7 +13141,7 @@ type HeadObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -14261,9 +13168,6 @@ func (s *HeadObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -14423,15 +13327,6 @@ type HeadObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - // The Legal Hold status for the specified object. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The object lock mode currently in place for this object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when this object's object lock expires. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` @@ -14457,7 +13352,7 @@ type HeadObjectOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -14568,24 +13463,6 @@ func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { return s } -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { - s.ObjectLockRetainUntilDate = &v - return s -} - // SetPartsCount sets the PartsCount field's value. func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { s.PartsCount = &v @@ -14778,9 +13655,6 @@ func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { return s } -// Specifies the inventory configuration for an Amazon S3 bucket. For more information, -// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) -// in the Amazon Simple Storage Service API Reference. type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -14798,16 +13672,12 @@ type InventoryConfiguration struct { // Id is a required field Id *string `type:"string" required:"true"` - // Object versions to include in the inventory list. If set to All, the list - // includes all the object versions, which adds the version-related fields VersionId, - // IsLatest, and DeleteMarker to the list. If set to Current, the list does - // not contain these version-related fields. + // Specifies which object version(s) to included in the inventory results. // // IncludedObjectVersions is a required field IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` - // Specifies whether the inventory is enabled or disabled. If set to True, an - // inventory list is generated. If set to False, no inventory list is generated. + // Specifies whether the inventory is enabled or disabled. // // IsEnabled is a required field IsEnabled *bool `type:"boolean" required:"true"` @@ -15250,15 +14120,11 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { type LambdaFunctionConfiguration struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For - // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // A container for object key name filtering rules. For information about key + // name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -15266,8 +14132,8 @@ type LambdaFunctionConfiguration struct { // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` - // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 - // invokes when the specified event type occurs. + // The Amazon Resource Name (ARN) of the Lambda cloud function that Amazon S3 + // can invoke when it detects events of the specified type. // // LambdaFunctionArn is a required field LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` @@ -15418,11 +14284,8 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp type LifecycleRule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an incomplete multipart upload - // that Amazon S3 will wait before permanently removing all parts of the upload. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon Simple Storage Service Developer Guide. + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` Expiration *LifecycleExpiration `type:"structure"` @@ -15444,7 +14307,7 @@ type LifecycleRule struct { NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` // Prefix identifying one or more objects to which the rule applies. This is - // No longer used; use Filter instead. + // deprecated; use Filter instead. // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -15661,7 +14524,7 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { } type ListBucketAnalyticsConfigurationsInput struct { - _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket from which analytics configurations are retrieved. // @@ -15689,9 +14552,6 @@ func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -15773,7 +14633,7 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str } type ListBucketInventoryConfigurationsInput struct { - _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket containing the inventory configurations to retrieve. // @@ -15803,9 +14663,6 @@ func (s *ListBucketInventoryConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -15887,7 +14744,7 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str } type ListBucketMetricsConfigurationsInput struct { - _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` + _ struct{} `type:"structure"` // The name of the bucket containing the metrics configurations to retrieve. // @@ -15917,9 +14774,6 @@ func (s *ListBucketMetricsConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -16047,7 +14901,7 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { } type ListMultipartUploadsInput struct { - _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16098,9 +14952,6 @@ func (s *ListMultipartUploadsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -16291,7 +15142,7 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti } type ListObjectVersionsInput struct { - _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16337,9 +15188,6 @@ func (s *ListObjectVersionsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -16524,7 +15372,7 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe } type ListObjectsInput struct { - _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16572,9 +15420,6 @@ func (s *ListObjectsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -16736,7 +15581,7 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { } type ListObjectsV2Input struct { - _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` + _ struct{} `type:"structure"` // Name of the bucket to list. // @@ -16792,9 +15637,6 @@ func (s *ListObjectsV2Input) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -16997,7 +15839,7 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { } type ListPartsInput struct { - _ struct{} `locationName:"ListPartsRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -17040,9 +15882,6 @@ func (s *ListPartsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -17379,10 +16218,9 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { return s } -// Describes where logs are stored and the prefix that Amazon S3 assigns to -// all log object keys for a bucket. For more information, see PUT Bucket logging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) -// in the Amazon Simple Storage Service API Reference. +// Container for logging information. Presence of this element indicates that +// logging is enabled. Parameters TargetBucket and TargetPrefix are required +// in this case. type LoggingEnabled struct { _ struct{} `type:"structure"` @@ -17398,9 +16236,8 @@ type LoggingEnabled struct { TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - // A prefix for all log object keys. If you store log files from multiple Amazon - // S3 buckets in a single bucket, you can use a prefix to distinguish which - // log files came from which bucket. + // This element lets you specify a prefix for the keys that the log files will + // be stored under. // // TargetPrefix is a required field TargetPrefix *string `type:"string" required:"true"` @@ -17543,13 +16380,6 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { return s } -// Specifies a metrics configuration for the CloudWatch request metrics (specified -// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating -// an existing metrics configuration, note that this is a full replacement of -// the existing metrics configuration. If you don't include the elements you -// want to keep, they are erased. For more information, see PUT Bucket metrics -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) -// in the Amazon Simple Storage Service API Reference. type MetricsConfiguration struct { _ struct{} `type:"structure"` @@ -17745,8 +16575,8 @@ type NoncurrentVersionExpiration struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon Simple Storage Service Developer Guide. + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` } @@ -17767,20 +16597,19 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers } // Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, -// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning -// is suspended), you can set this action to request that Amazon S3 transition -// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, -// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's -// lifetime. +// transition to the STANDARD_IA, ONEZONE_IA, or GLACIER storage class. If your +// bucket is versioning-enabled (or versioning is suspended), you can set this +// action to request that Amazon S3 transition noncurrent object versions to +// the STANDARD_IA, ONEZONE_IA, or GLACIER storage class at a specific period +// in the object's lifetime. type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) - // in the Amazon Simple Storage Service Developer Guide. + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` // The class of storage used to store the object. @@ -17814,16 +16643,10 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi type NotificationConfiguration struct { _ struct{} `type:"structure"` - // Describes the AWS Lambda functions to invoke and the events for which to - // invoke them. LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` - // The Amazon Simple Queue Service queues to publish messages to and the events - // for which to publish messages. QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` - // The topic to which notifications are sent and the events for which notifications - // are generated. TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` } @@ -17933,8 +16756,8 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf return s } -// Specifies object key name filtering rules. For information about key name -// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// A container for object key name filtering rules. For information about key +// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -18057,133 +16880,18 @@ func (s *ObjectIdentifier) Validate() error { if invalidParams.Len() > 0 { return invalidParams } - return nil -} - -// SetKey sets the Key field's value. -func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { - s.VersionId = &v - return s -} - -// The container element for object lock configuration parameters. -type ObjectLockConfiguration struct { - _ struct{} `type:"structure"` - - // Indicates whether this bucket has an object lock configuration enabled. - ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` - - // The object lock rule in place for the specified object. - Rule *ObjectLockRule `type:"structure"` -} - -// String returns the string representation -func (s ObjectLockConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectLockConfiguration) GoString() string { - return s.String() -} - -// SetObjectLockEnabled sets the ObjectLockEnabled field's value. -func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { - s.ObjectLockEnabled = &v - return s -} - -// SetRule sets the Rule field's value. -func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { - s.Rule = v - return s -} - -// A Legal Hold configuration for an object. -type ObjectLockLegalHold struct { - _ struct{} `type:"structure"` - - // Indicates whether the specified object has a Legal Hold in place. - Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` -} - -// String returns the string representation -func (s ObjectLockLegalHold) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectLockLegalHold) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { - s.Status = &v - return s -} - -// A Retention configuration for an object. -type ObjectLockRetention struct { - _ struct{} `type:"structure"` - - // Indicates the Retention mode for the specified object. - Mode *string `type:"string" enum:"ObjectLockRetentionMode"` - - // The date on which this object lock retention expires. - RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` -} - -// String returns the string representation -func (s ObjectLockRetention) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectLockRetention) GoString() string { - return s.String() -} - -// SetMode sets the Mode field's value. -func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { - s.Mode = &v - return s -} - -// SetRetainUntilDate sets the RetainUntilDate field's value. -func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { - s.RetainUntilDate = &v - return s -} - -// The container element for an object lock rule. -type ObjectLockRule struct { - _ struct{} `type:"structure"` - - // The default retention period that you want to apply to new objects placed - // in the specified bucket. - DefaultRetention *DefaultRetention `type:"structure"` -} - -// String returns the string representation -func (s ObjectLockRule) String() string { - return awsutil.Prettify(s) + return nil } -// GoString returns the string representation -func (s ObjectLockRule) GoString() string { - return s.String() +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s } -// SetDefaultRetention sets the DefaultRetention field's value. -func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { - s.DefaultRetention = v +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v return s } @@ -18439,12 +17147,12 @@ func (s *Part) SetSize(v int64) *Part { return s } -// The container element for a bucket's policy status. +// The container element for this bucket's public-policy status. type PolicyStatus struct { _ struct{} `type:"structure"` - // The policy status for this bucket. TRUE indicates that this bucket is public. - // FALSE indicates that the bucket is not public. + // The public-policy status for this bucket. TRUE indicates that this bucket + // is public. FALSE indicates that the bucket is not public. IsPublic *bool `locationName:"IsPublic" type:"boolean"` } @@ -18545,45 +17253,75 @@ func (s *ProgressEvent) UnmarshalEvent( return nil } -// Specifies the Block Public Access configuration for an Amazon S3 bucket. +// The container element for all Public Access Block configuration options. +// You can enable the configuration options in any combination. +// +// Amazon S3 considers a bucket policy public unless at least one of the following +// conditions is true: +// +// The policy limits access to a set of CIDRs using aws:SourceIp. For more information +// on CIDR, see http://www.rfc-editor.org/rfc/rfc4632.txt (http://www.rfc-editor.org/rfc/rfc4632.txt) +// +// The policy grants permissions, not including any "bad actions," to one of +// the following: +// +// A fixed AWS principal, user, role, or service principal +// +// A fixed aws:SourceArn +// +// A fixed aws:SourceVpc +// +// A fixed aws:SourceVpce +// +// A fixed aws:SourceOwner +// +// A fixed aws:SourceAccount +// +// A fixed value of s3:x-amz-server-side-encryption-aws-kms-key-id +// +// A fixed value of aws:userid outside the pattern "AROLEID:*" +// +// "Bad actions" are those that could expose the data inside a bucket to reads +// or writes by the public. These actions are s3:Get*, s3:List*, s3:AbortMultipartUpload, +// s3:Delete*, s3:Put*, and s3:RestoreObject. +// +// The star notation for bad actions indicates that all matching operations +// are considered bad actions. For example, because s3:Get* is a bad action, +// s3:GetObject, s3:GetObjectVersion, and s3:GetObjectAcl are all bad actions. type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` - // Specifies whether Amazon S3 should block public access control lists (ACLs) - // for this bucket and objects in this bucket. Setting this element to TRUE - // causes the following behavior: - // - // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is - // public. + // Specifies whether Amazon S3 should block public ACLs for this bucket. Setting + // this element to TRUEcauses the following behavior: // - // * PUT Object calls fail if the request includes a public ACL. + // PUT Bucket acl and PUT Object acl calls will fail if the specified ACL allows + // public access. // - // Enabling this setting doesn't affect existing policies or ACLs. + // * PUT Object calls will fail if the request includes an object ACL. BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` // Specifies whether Amazon S3 should block public bucket policies for this // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to // PUT Bucket policy if the specified bucket policy allows public access. // - // Enabling this setting doesn't affect existing bucket policies. + // Note that enabling this setting doesn't affect existing bucket policies. BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` - // Specifies whether Amazon S3 should ignore public ACLs for this bucket and - // objects in this bucket. Setting this element to TRUE causes Amazon S3 to - // ignore all public ACLs on this bucket and objects in this bucket. + // Specifies whether Amazon S3 should ignore public ACLs for this bucket. Setting + // this element to TRUE causes Amazon S3 to ignore all public ACLs on this bucket + // and any objects that it contains. // - // Enabling this setting doesn't affect the persistence of any existing ACLs - // and doesn't prevent new public ACLs from being set. + // Note that enabling this setting doesn't affect the persistence of any existing + // ACLs and doesn't prevent new public ACLs from being set. IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` // Specifies whether Amazon S3 should restrict public bucket policies for this - // bucket. Setting this element to TRUE restricts access to this bucket to only - // AWS services and authorized users within this account if the bucket has a - // public policy. + // bucket. If this element is set to TRUE, then only the bucket owner and AWS + // Services can access this bucket if it has a public policy. // - // Enabling this setting doesn't affect previously stored bucket policies, except - // that public and cross-account access within any public bucket policy, including - // non-public delegation to specific accounts, is blocked. + // Note that enabling this setting doesn't affect previously stored bucket policies, + // except that public and cross-account access within any public bucket policy, + // including non-public delegation to specific accounts, is blocked. RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` } @@ -18622,7 +17360,7 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi } type PutBucketAccelerateConfigurationInput struct { - _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` + _ struct{} `type:"structure" payload:"AccelerateConfiguration"` // Specifies the Accelerate Configuration you want to set for the bucket. // @@ -18654,9 +17392,6 @@ func (s *PutBucketAccelerateConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -18698,12 +17433,11 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string { } type PutBucketAclInput struct { - _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` + _ struct{} `type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field @@ -18742,9 +17476,6 @@ func (s *PutBucketAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.AccessControlPolicy != nil { if err := s.AccessControlPolicy.Validate(); err != nil { invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) @@ -18827,7 +17558,7 @@ func (s PutBucketAclOutput) GoString() string { } type PutBucketAnalyticsConfigurationInput struct { - _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` // The configuration and any analyses for the analytics filter. // @@ -18839,7 +17570,7 @@ type PutBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The ID that identifies the analytics configuration. + // The identifier used to represent an analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -18864,9 +17595,6 @@ func (s *PutBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -18922,16 +17650,11 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { } type PutBucketCorsInput struct { - _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + _ struct{} `type:"structure" payload:"CORSConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Describes the cross-origin access configuration for objects in an Amazon - // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon - // Simple Storage Service Developer Guide. - // // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -18952,9 +17675,6 @@ func (s *PutBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.CORSConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) } @@ -19004,18 +17724,16 @@ func (s PutBucketCorsOutput) GoString() string { } type PutBucketEncryptionInput struct { - _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` - // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information - // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) - // in the Amazon Simple Storage Service Developer Guide. + // The name of the bucket for which the server-side encryption configuration + // is set. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies the default server-side-encryption configuration. + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. // // ServerSideEncryptionConfiguration is a required field ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -19037,9 +17755,6 @@ func (s *PutBucketEncryptionInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.ServerSideEncryptionConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) } @@ -19089,7 +17804,7 @@ func (s PutBucketEncryptionOutput) GoString() string { } type PutBucketInventoryConfigurationInput struct { - _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` + _ struct{} `type:"structure" payload:"InventoryConfiguration"` // The name of the bucket where the inventory configuration will be stored. // @@ -19123,9 +17838,6 @@ func (s *PutBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -19184,14 +17896,11 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string { } type PutBucketLifecycleConfigurationInput struct { - _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. - // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) - // in the Amazon Simple Storage Service Developer Guide. LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19211,9 +17920,6 @@ func (s *PutBucketLifecycleConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.LifecycleConfiguration != nil { if err := s.LifecycleConfiguration.Validate(); err != nil { invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) @@ -19260,7 +17966,7 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string { } type PutBucketLifecycleInput struct { - _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19284,9 +17990,6 @@ func (s *PutBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.LifecycleConfiguration != nil { if err := s.LifecycleConfiguration.Validate(); err != nil { invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) @@ -19333,7 +18036,7 @@ func (s PutBucketLifecycleOutput) GoString() string { } type PutBucketLoggingInput struct { - _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19358,9 +18061,6 @@ func (s *PutBucketLoggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.BucketLoggingStatus == nil { invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) } @@ -19410,7 +18110,7 @@ func (s PutBucketLoggingOutput) GoString() string { } type PutBucketMetricsConfigurationInput struct { - _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` + _ struct{} `type:"structure" payload:"MetricsConfiguration"` // The name of the bucket for which the metrics configuration is set. // @@ -19444,9 +18144,6 @@ func (s *PutBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -19505,7 +18202,7 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { } type PutBucketNotificationConfigurationInput struct { - _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + _ struct{} `type:"structure" payload:"NotificationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19533,9 +18230,6 @@ func (s *PutBucketNotificationConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.NotificationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } @@ -19585,7 +18279,7 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { } type PutBucketNotificationInput struct { - _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + _ struct{} `type:"structure" payload:"NotificationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19610,9 +18304,6 @@ func (s *PutBucketNotificationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.NotificationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } @@ -19657,7 +18348,7 @@ func (s PutBucketNotificationOutput) GoString() string { } type PutBucketPolicyInput struct { - _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + _ struct{} `type:"structure" payload:"Policy"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19688,9 +18379,6 @@ func (s *PutBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Policy == nil { invalidParams.Add(request.NewErrParamRequired("Policy")) } @@ -19741,7 +18429,7 @@ func (s PutBucketPolicyOutput) GoString() string { } type PutBucketReplicationInput struct { - _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19751,9 +18439,6 @@ type PutBucketReplicationInput struct { // // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // A token that allows Amazon S3 object lock to be enabled for an existing bucket. - Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } // String returns the string representation @@ -19772,9 +18457,6 @@ func (s *PutBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.ReplicationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) } @@ -19809,12 +18491,6 @@ func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationCo return s } -// SetToken sets the Token field's value. -func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { - s.Token = &v - return s -} - type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -19830,7 +18506,7 @@ func (s PutBucketReplicationOutput) GoString() string { } type PutBucketRequestPaymentInput struct { - _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19855,9 +18531,6 @@ func (s *PutBucketRequestPaymentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.RequestPaymentConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) } @@ -19907,7 +18580,7 @@ func (s PutBucketRequestPaymentOutput) GoString() string { } type PutBucketTaggingInput struct { - _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + _ struct{} `type:"structure" payload:"Tagging"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19932,9 +18605,6 @@ func (s *PutBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Tagging == nil { invalidParams.Add(request.NewErrParamRequired("Tagging")) } @@ -19984,7 +18654,7 @@ func (s PutBucketTaggingOutput) GoString() string { } type PutBucketVersioningInput struct { - _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + _ struct{} `type:"structure" payload:"VersioningConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19993,10 +18663,6 @@ type PutBucketVersioningInput struct { // and the value that is displayed on your authentication device. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Describes the versioning state of an Amazon S3 bucket. For more information, - // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) - // in the Amazon Simple Storage Service API Reference. - // // VersioningConfiguration is a required field VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -20017,9 +18683,6 @@ func (s *PutBucketVersioningInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.VersioningConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) } @@ -20070,13 +18733,11 @@ func (s PutBucketVersioningOutput) GoString() string { } type PutBucketWebsiteInput struct { - _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies website configuration parameters for an Amazon S3 bucket. - // // WebsiteConfiguration is a required field WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -20097,9 +18758,6 @@ func (s *PutBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.WebsiteConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) } @@ -20149,12 +18807,11 @@ func (s PutBucketWebsiteOutput) GoString() string { } type PutObjectAclInput struct { - _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` + _ struct{} `type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field @@ -20205,9 +18862,6 @@ func (s *PutObjectAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -20324,7 +18978,7 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { } type PutObjectInput struct { - _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + _ struct{} `type:"structure" payload:"Body"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` @@ -20355,9 +19009,7 @@ type PutObjectInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameted is required - // if object lock parameters are specified. + // The base64-encoded 128-bit MD5 digest of the part data. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // A standard MIME type describing the format of the object data. @@ -20386,15 +19038,6 @@ type PutObjectInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // The Legal Hold status that you want to apply to the specified object. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The object lock mode that you want to apply to this object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when you want this object's object lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. // Documentation on downloading objects from requester pays buckets can be found @@ -20409,23 +19052,18 @@ type PutObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // Specifies the AWS KMS Encryption Context to use for object encryption. The - // value of this header is a base64-encoded UTF-8 string holding JSON with the - // encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -20460,9 +19098,6 @@ func (s *PutObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -20555,340 +19190,94 @@ func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { return s } -// SetGrantRead sets the GrantRead field's value. -func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectInput) SetKey(v string) *PutObjectInput { - s.Key = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { - s.Metadata = v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { - s.SSECustomerKey = &v - return s -} - -func (s *PutObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { - s.Tagging = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { - s.WebsiteRedirectLocation = &v - return s -} - -type PutObjectLegalHoldInput struct { - _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` - - // The bucket containing the object that you want to place a Legal Hold on. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The key name for the object that you want to place a Legal Hold on. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Container element for the Legal Hold configuration you want to apply to the - // specified object. - LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The version ID of the object that you want to place a Legal Hold on. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s PutObjectLegalHoldInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectLegalHoldInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectLegalHoldInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { - s.Bucket = &v - return s -} - -func (s *PutObjectLegalHoldInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetKey sets the Key field's value. -func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { - s.Key = &v - return s -} - -// SetLegalHold sets the LegalHold field's value. -func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { - s.LegalHold = v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { - s.VersionId = &v - return s -} - -type PutObjectLegalHoldOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s PutObjectLegalHoldOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectLegalHoldOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { - s.RequestCharged = &v +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v return s } -type PutObjectLockConfigurationInput struct { - _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` - - // The bucket whose object lock configuration you want to create or replace. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The object lock configuration that you want to apply to the specified bucket. - ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} - // A token to allow Amazon S3 object lock to be enabled for an existing bucket. - Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s } -// String returns the string representation -func (s PutObjectLockConfigurationInput) String() string { - return awsutil.Prettify(s) +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s } -// GoString returns the string representation -func (s PutObjectLockConfigurationInput) GoString() string { - return s.String() +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectLockConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { - s.Bucket = &v +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v return s } -func (s *PutObjectLockConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { return v } - return *s.Bucket + return *s.SSECustomerKey } -// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. -func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { - s.ObjectLockConfiguration = v +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v return s } -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { - s.RequestPayer = &v +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v return s } -// SetToken sets the Token field's value. -func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { - s.Token = &v +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v return s } -type PutObjectLockConfigurationOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s PutObjectLockConfigurationOutput) String() string { - return awsutil.Prettify(s) +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s } -// GoString returns the string representation -func (s PutObjectLockConfigurationOutput) GoString() string { - return s.String() +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s } -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { - s.RequestCharged = &v +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v return s } @@ -20916,14 +19305,9 @@ type PutObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the AWS KMS Encryption Context to use for object encryption. - // The value of this header is a base64-encoded UTF-8 string holding JSON with - // the encryption context key-value pairs. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -20973,12 +19357,6 @@ func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { return s } -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { - s.SSEKMSEncryptionContext = &v - return s -} - // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { s.SSEKMSKeyId = &v @@ -20997,139 +19375,8 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { return s } -type PutObjectRetentionInput struct { - _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` - - // The bucket that contains the object you want to apply this Object Retention - // configuration to. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates whether this operation should bypass Governance-mode restrictions.j - BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - - // The key name for the object that you want to apply this Object Retention - // configuration to. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The container element for the Object Retention configuration. - Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The version ID for the object that you want to apply this Object Retention - // configuration to. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s PutObjectRetentionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectRetentionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectRetentionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { - s.Bucket = &v - return s -} - -func (s *PutObjectRetentionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. -func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { - s.BypassGovernanceRetention = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { - s.RequestPayer = &v - return s -} - -// SetRetention sets the Retention field's value. -func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { - s.Retention = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { - s.VersionId = &v - return s -} - -type PutObjectRetentionOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s PutObjectRetentionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectRetentionOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { - s.RequestCharged = &v - return s -} - type PutObjectTaggingInput struct { - _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + _ struct{} `type:"structure" payload:"Tagging"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -21159,9 +19406,6 @@ func (s *PutObjectTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -21237,19 +19481,16 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput } type PutPublicAccessBlockInput struct { - _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` - // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you - // want to set. + // The name of the Amazon S3 bucket whose Public Access Block configuration + // you want to set. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The PublicAccessBlock configuration that you want to apply to this Amazon - // S3 bucket. You can enable the configuration options in any combination. For - // more information about when Amazon S3 considers a bucket or object public, - // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon Simple Storage Service Developer Guide. + // The Public Access Block configuration that you want to apply to this Amazon + // S3 bucket. // // PublicAccessBlockConfiguration is a required field PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -21271,9 +19512,6 @@ func (s *PutPublicAccessBlockInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.PublicAccessBlockConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) } @@ -21317,16 +19555,17 @@ func (s PutPublicAccessBlockOutput) GoString() string { return s.String() } -// Specifies the configuration for publishing messages to an Amazon Simple Queue -// Service (Amazon SQS) queue when Amazon S3 detects specified events. +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Queue Service (Amazon SQS) queue.when Amazon S3 detects +// specified events. type QueueConfiguration struct { _ struct{} `type:"structure"` // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // A container for object key name filtering rules. For information about key + // name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -21335,7 +19574,7 @@ type QueueConfiguration struct { Id *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 - // publishes a message when it detects events of the specified type. + // will publish a message when it detects events of the specified type. // // QueueArn is a required field QueueArn *string `locationName:"Queue" type:"string" required:"true"` @@ -21481,8 +19720,6 @@ func (s *RecordsEvent) UnmarshalEvent( return nil } -// Specifies how requests are redirected. In the event of an error, you can -// specify a different error code to return. type Redirect struct { _ struct{} `type:"structure"` @@ -21493,8 +19730,8 @@ type Redirect struct { // siblings is present. HttpRedirectCode *string `type:"string"` - // Protocol to use when redirecting requests. The default is the protocol that - // is used in the original request. + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. Protocol *string `type:"string" enum:"Protocol"` // The object key prefix to use in the redirect request. For example, to redirect @@ -21506,7 +19743,7 @@ type Redirect struct { ReplaceKeyPrefixWith *string `type:"string"` // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the siblings is present. Can + // request to error.html. Not required if one of the sibling is present. Can // be present only if ReplaceKeyPrefixWith is not provided. ReplaceKeyWith *string `type:"string"` } @@ -21551,18 +19788,16 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { return s } -// Specifies the redirect behavior of all requests to a website endpoint of -// an Amazon S3 bucket. type RedirectAllRequestsTo struct { _ struct{} `type:"structure"` - // Name of the host where requests are redirected. + // Name of the host where requests will be redirected. // // HostName is a required field HostName *string `type:"string" required:"true"` - // Protocol to use when redirecting requests. The default is the protocol that - // is used in the original request. + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. Protocol *string `type:"string" enum:"Protocol"` } @@ -21607,9 +19842,7 @@ type ReplicationConfiguration struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) role that Amazon S3 assumes when replicating objects. For more information, - // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html) - // in the Amazon Simple Storage Service Developer Guide. + // (IAM) role that Amazon S3 can assume when replicating the objects. // // Role is a required field Role *string `type:"string" required:"true"` @@ -21669,7 +19902,7 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo return s } -// Specifies which Amazon S3 objects to replicate and where to store the replicas. +// A container for information about a specific replication rule. type ReplicationRule struct { _ struct{} `type:"structure"` @@ -21689,8 +19922,7 @@ type ReplicationRule struct { ID *string `type:"string"` // An object keyname prefix that identifies the object or objects to which the - // rule applies. The maximum prefix length is 1,024 characters. To include all - // objects in a bucket, specify an empty string. + // rule applies. The maximum prefix length is 1,024 characters. // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -21706,7 +19938,7 @@ type ReplicationRule struct { // * Same object qualify tag based filter criteria specified in multiple // rules // - // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) + // For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) // in the Amazon S3 Developer Guide. Priority *int64 `type:"integer"` @@ -21715,9 +19947,12 @@ type ReplicationRule struct { // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using // an AWS KMS-Managed Key (SSE-KMS). + // + // If you want Amazon S3 to replicate objects created with server-side encryption + // using AWS KMS-Managed Keys. SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` - // Specifies whether the rule is enabled. + // If status isn't enabled, the rule is ignored. // // Status is a required field Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` @@ -21999,7 +20234,7 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { } type RestoreObjectInput struct { - _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + _ struct{} `type:"structure" payload:"RestoreRequest"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -22035,9 +20270,6 @@ func (s *RestoreObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -22232,7 +20464,6 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { return s } -// Specifies the redirect behavior and when a redirect is applied. type RoutingRule struct { _ struct{} `type:"structure"` @@ -22285,22 +20516,16 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { return s } -// Specifies lifecycle rules for an Amazon S3 bucket. For more information, -// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) -// in the Amazon Simple Storage Service API Reference. type Rule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an incomplete multipart upload - // that Amazon S3 will wait before permanently removing all parts of the upload. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon Simple Storage Service Developer Guide. + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` Expiration *LifecycleExpiration `type:"structure"` - // Unique identifier for the rule. The value can't be longer than 255 characters. + // Unique identifier for the rule. The value cannot be longer than 255 characters. ID *string `type:"string"` // Specifies when noncurrent object versions expire. Upon expiration, Amazon @@ -22311,27 +20536,24 @@ type Rule struct { NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, - // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning - // is suspended), you can set this action to request that Amazon S3 transition - // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, - // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's - // lifetime. + // transition to the STANDARD_IA, ONEZONE_IA, or GLACIER storage class. If your + // bucket is versioning-enabled (or versioning is suspended), you can set this + // action to request that Amazon S3 transition noncurrent object versions to + // the STANDARD_IA, ONEZONE_IA, or GLACIER storage class at a specific period + // in the object's lifetime. NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - // Object key prefix that identifies one or more objects to which this rule - // applies. + // Prefix identifying one or more objects to which the rule applies. // // Prefix is a required field Prefix *string `type:"string" required:"true"` - // If Enabled, the rule is currently being applied. If Disabled, the rule is - // not currently being applied. + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. // // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - // Specifies when an object transitions to a specified storage class. Transition *Transition `type:"structure"` } @@ -22417,7 +20639,7 @@ type SSEKMS struct { // key to use for encrypting Inventory reports. // // KeyId is a required field - KeyId *string `type:"string" required:"true" sensitive:"true"` + KeyId *string `type:"string" required:"true"` } // String returns the string representation @@ -22566,7 +20788,7 @@ type SelectObjectContentEventStreamReader interface { // HTTP this will also close the HTTP connection. Close() error - // Returns any error that has occurred while reading from the event stream. + // Returns any error that has occured while reading from the event stream. Err() error } @@ -22690,7 +20912,7 @@ func (r *readSelectObjectContentEventStream) unmarshalerForEventType( // Amazon S3 uses this to parse object data into records. It returns only records // that match the specified SQL expression. You must also specify the data serialization // format for the response. For more information, see S3Select API Documentation -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). type SelectObjectContentInput struct { _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -22727,16 +20949,16 @@ type SelectObjectContentInput struct { // Specifies if periodic request progress information should be enabled. RequestProgress *RequestProgress `type:"structure"` - // The SSE Algorithm used to encrypt the object. For more information, see Server-Side - // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The SSE Algorithm used to encrypt the object. For more information, see + // Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // The SSE Customer Key. For more information, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + // The SSE Customer Key. For more information, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - // The SSE Customer Key MD5. For more information, see Server-Side Encryption - // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The SSE Customer Key MD5. For more information, see Server-Side Encryption + // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` } @@ -22756,9 +20978,6 @@ func (s *SelectObjectContentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Expression == nil { invalidParams.Add(request.NewErrParamRequired("Expression")) } @@ -22982,16 +21201,14 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec } // Describes the default server-side encryption to apply to new objects in the -// bucket. If a PUT Object request doesn't specify any server-side encryption, -// this default encryption will be applied. For more information, see PUT Bucket -// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) -// in the Amazon Simple Storage Service API Reference. +// bucket. If Put Object request does not specify any server-side encryption, +// this default encryption will be applied. type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` // KMS master key ID to use for the default encryption. This parameter is allowed - // if and only if SSEAlgorithm is set to aws:kms. - KMSMasterKeyID *string `type:"string" sensitive:"true"` + // if SSEAlgorithm is aws:kms. + KMSMasterKeyID *string `type:"string"` // Server-side encryption algorithm to use for the default encryption. // @@ -23034,7 +21251,8 @@ func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEnc return s } -// Specifies the default server-side-encryption configuration. +// Container for server-side encryption configuration rules. Currently S3 supports +// one rule only. type ServerSideEncryptionConfiguration struct { _ struct{} `type:"structure"` @@ -23084,12 +21302,13 @@ func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRu return s } -// Specifies the default server-side encryption configuration. +// Container for information about a particular server-side encryption configuration +// rule. type ServerSideEncryptionRule struct { _ struct{} `type:"structure"` - // Specifies the default server-side encryption to apply to new objects in the - // bucket. If a PUT Object request doesn't specify any server-side encryption, + // Describes the default server-side encryption to apply to new objects in the + // bucket. If Put Object request does not specify any server-side encryption, // this default encryption will be applied. ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` } @@ -23125,17 +21344,13 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv return s } -// A container that describes additional filters for identifying the source -// objects that you want to replicate. You can choose to enable or disable the -// replication of these objects. Currently, Amazon S3 supports only the filter -// that you can specify for objects created with server-side encryption using -// an AWS KMS-Managed Key (SSE-KMS). +// A container for filters that define which source objects should be replicated. type SourceSelectionCriteria struct { _ struct{} `type:"structure"` - // A container for filter information for the selection of Amazon S3 objects - // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication - // configuration, this element is required. + // A container for filter information for the selection of S3 objects encrypted + // with AWS KMS. If you include SourceSelectionCriteria in the replication configuration, + // this element is required. SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` } @@ -23175,8 +21390,8 @@ func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedOb type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` - // Specifies whether Amazon S3 replicates objects created with server-side encryption - // using an AWS KMS-managed key. + // If the status is not Enabled, replication for S3 objects encrypted with AWS + // KMS is disabled. // // Status is a required field Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` @@ -23292,14 +21507,11 @@ func (s *StatsEvent) UnmarshalEvent( return nil } -// Specifies data related to access patterns to be collected and made available -// to analyze the tradeoffs between different storage classes for an Amazon -// S3 bucket. type StorageClassAnalysis struct { _ struct{} `type:"structure"` - // Specifies how data related to the storage class analysis for an Amazon S3 - // bucket should be exported. + // A container used to describe how data related to the storage class analysis + // should be exported. DataExport *StorageClassAnalysisDataExport `type:"structure"` } @@ -23539,20 +21751,16 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { } // A container for specifying the configuration for publication of messages -// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// to an Amazon Simple Notification Service (Amazon SNS) topic.when Amazon S3 // detects specified events. type TopicConfiguration struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket event about which to send notifications. For more information, - // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // A container for object key name filtering rules. For information about key + // name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -23561,7 +21769,7 @@ type TopicConfiguration struct { Id *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 - // publishes a message when it detects events of the specified type. + // will publish a message when it detects events of the specified type. // // TopicArn is a required field TopicArn *string `locationName:"Topic" type:"string" required:"true"` @@ -23670,19 +21878,18 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } -// Specifies when an object transitions to a specified storage class. type Transition struct { _ struct{} `type:"structure"` - // Indicates when objects are transitioned to the specified storage class. The - // date value must be in ISO 8601 format. The time is always midnight UTC. + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // Indicates the number of days after creation when objects are transitioned - // to the specified storage class. The value must be a positive integer. + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. Days *int64 `type:"integer"` - // The storage class to which you want the object to transition. + // The class of storage used to store the object. StorageClass *string `type:"string" enum:"TransitionStorageClass"` } @@ -23715,7 +21922,7 @@ func (s *Transition) SetStorageClass(v string) *Transition { } type UploadPartCopyInput struct { - _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -23743,7 +21950,7 @@ type UploadPartCopyInput struct { // the form bytes=first-last, where the first and last are the zero-based byte // offsets to copy. For example, bytes=0-9 indicates that you want to copy the // first ten bytes of the source. You can copy a range only if the source object - // is greater than 5 MB. + // is greater than 5 GB. CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` // Specifies the algorithm to use when decrypting the source object (e.g., AES256). @@ -23752,7 +21959,7 @@ type UploadPartCopyInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -23783,7 +21990,7 @@ type UploadPartCopyInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -23812,9 +22019,6 @@ func (s *UploadPartCopyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.CopySource == nil { invalidParams.Add(request.NewErrParamRequired("CopySource")) } @@ -23985,7 +22189,7 @@ type UploadPartCopyOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -24045,7 +22249,7 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy } type UploadPartInput struct { - _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` + _ struct{} `type:"structure" payload:"Body"` // Object data. Body io.ReadSeeker `type:"blob"` @@ -24059,9 +22263,7 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameted is required - // if object lock parameters are specified. + // The base64-encoded 128-bit MD5 digest of the part data. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // Object key for which the multipart upload was initiated. @@ -24090,7 +22292,7 @@ type UploadPartInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -24119,9 +22321,6 @@ func (s *UploadPartInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -24243,7 +22442,7 @@ type UploadPartOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -24296,9 +22495,6 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { return s } -// Describes the versioning state of an Amazon S3 bucket. For more information, -// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) -// in the Amazon Simple Storage Service API Reference. type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -24333,22 +22529,15 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { return s } -// Specifies website configuration parameters for an Amazon S3 bucket. type WebsiteConfiguration struct { _ struct{} `type:"structure"` - // The name of the error document for the website. ErrorDocument *ErrorDocument `type:"structure"` - // The name of the index document for the website. IndexDocument *IndexDocument `type:"structure"` - // The redirect behavior for every request to this bucket's website endpoint. - // - // If you specify this property, you can't specify any other property. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -24560,12 +22749,6 @@ const ( // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" - - // EventS3ObjectRestorePost is a Event enum value - EventS3ObjectRestorePost = "s3:ObjectRestore:Post" - - // EventS3ObjectRestoreCompleted is a Event enum value - EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" ) const ( @@ -24606,9 +22789,6 @@ const ( // InventoryFormatOrc is a InventoryFormat enum value InventoryFormatOrc = "ORC" - - // InventoryFormatParquet is a InventoryFormat enum value - InventoryFormatParquet = "Parquet" ) const ( @@ -24648,15 +22828,6 @@ const ( // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" - - // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value - InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" - - // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value - InventoryOptionalFieldObjectLockMode = "ObjectLockMode" - - // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value - InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" ) const ( @@ -24714,35 +22885,6 @@ const ( ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" ) -const ( - // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value - ObjectLockEnabledEnabled = "Enabled" -) - -const ( - // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value - ObjectLockLegalHoldStatusOn = "ON" - - // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value - ObjectLockLegalHoldStatusOff = "OFF" -) - -const ( - // ObjectLockModeGovernance is a ObjectLockMode enum value - ObjectLockModeGovernance = "GOVERNANCE" - - // ObjectLockModeCompliance is a ObjectLockMode enum value - ObjectLockModeCompliance = "COMPLIANCE" -) - -const ( - // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value - ObjectLockRetentionModeGovernance = "GOVERNANCE" - - // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value - ObjectLockRetentionModeCompliance = "COMPLIANCE" -) - const ( // ObjectStorageClassStandard is a ObjectStorageClass enum value ObjectStorageClassStandard = "STANDARD" @@ -24758,12 +22900,6 @@ const ( // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value ObjectStorageClassOnezoneIa = "ONEZONE_IA" - - // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value - ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" - - // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value - ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( @@ -24888,15 +23024,6 @@ const ( // StorageClassOnezoneIa is a StorageClass enum value StorageClassOnezoneIa = "ONEZONE_IA" - - // StorageClassIntelligentTiering is a StorageClass enum value - StorageClassIntelligentTiering = "INTELLIGENT_TIERING" - - // StorageClassGlacier is a StorageClass enum value - StorageClassGlacier = "GLACIER" - - // StorageClassDeepArchive is a StorageClass enum value - StorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( @@ -24932,12 +23059,6 @@ const ( // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value TransitionStorageClassOnezoneIa = "ONEZONE_IA" - - // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value - TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" - - // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value - TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go index 9ba8a788..bc68a46a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -80,8 +80,7 @@ func buildGetBucketLocation(r *request.Request) { out := r.Data.(*GetBucketLocationOutput) b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, - "failed reading response body", err) + r.Error = awserr.New("SerializationError", "failed reading response body", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 23d386b1..6f560a40 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -17,8 +17,7 @@ func defaultInitClientFn(c *client.Client) { // Require SSL when using SSE keys c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeyMD5) - c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) + c.Handlers.Build.PushBack(computeSSEKeys) // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.Clear() @@ -34,7 +33,6 @@ func defaultInitRequestFn(r *request.Request) { switch r.Operation.Name { case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, - opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration, opPutBucketReplication: // These S3 operations require Content-MD5 to be set r.Handlers.Build.PushBack(contentMD5) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go index 4b65f715..39b912c2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -63,20 +63,6 @@ // See the s3manager package's Downloader type documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader // -// Automatic URI cleaning -// -// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) -// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct -// used by the service client. -// -// svc := s3.New(sess, &aws.Config{ -// DisableRestProtocolURICleaning: aws.Bool(true), -// }) -// out, err := svc.GetObject(&s3.GetObjectInput { -// Bucket: aws.String("bucketname"), -// Key: aws.String("//foo//bar//moo"), -// }) -// // Get Bucket Region // // GetBucketRegion will attempt to get the region for a bucket using a region diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index b71c835d..8010c4fa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -3,7 +3,6 @@ package s3 import ( "crypto/md5" "encoding/base64" - "net/http" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -31,54 +30,25 @@ func validateSSERequiresSSL(r *request.Request) { } } -const ( - sseKeyHeader = "x-amz-server-side-encryption-customer-key" - sseKeyMD5Header = sseKeyHeader + "-md5" -) - -func computeSSEKeyMD5(r *request.Request) { - var key string - if g, ok := r.Params.(sseCustomerKeyGetter); ok { - key = g.getSSECustomerKey() - } - - computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) -} - -const ( - copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" - copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" -) - -func computeCopySourceSSEKeyMD5(r *request.Request) { - var key string - if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { - key = g.getCopySourceSSECustomerKey() +func computeSSEKeys(r *request.Request) { + headers := []string{ + "x-amz-server-side-encryption-customer-key", + "x-amz-copy-source-server-side-encryption-customer-key", } - computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) -} - -func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { - if len(key) == 0 { - // Backwards compatiablity where user just set the header value instead - // of using the API parameter, or setting the header value for an - // operation without the parameters modeled. - key = r.Header.Get(keyHeader) - if len(key) == 0 { - return + for _, h := range headers { + md5h := h + "-md5" + if key := r.HTTPRequest.Header.Get(h); key != "" { + // Base64-encode the value + b64v := base64.StdEncoding.EncodeToString([]byte(key)) + r.HTTPRequest.Header.Set(h, b64v) + + // Add MD5 if it wasn't computed + if r.HTTPRequest.Header.Get(md5h) == "" { + sum := md5.Sum([]byte(key)) + b64sum := base64.StdEncoding.EncodeToString(sum[:]) + r.HTTPRequest.Header.Set(md5h, b64sum) + } } - - // In backwards compatiable, the header's value is not base64 encoded, - // and needs to be encoded and updated by the SDK's customizations. - b64Key := base64.StdEncoding.EncodeToString([]byte(key)) - r.Header.Set(keyHeader, b64Key) - } - - // Only update Key's MD5 if not already set. - if len(r.Header.Get(keyMD5Header)) == 0 { - sum := md5.Sum([]byte(key)) - keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) - r.Header.Set(keyMD5Header, keyMD5) } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index f6a69aed..fde3050f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -14,7 +14,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "unable to read response body", err), + awserr.New("SerializationError", "unable to read response body", err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -31,7 +31,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { unmarshalError(r) if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == request.ErrCodeSerialization { + if err.Code() == "SerializationError" { r.Error = nil return } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index 5b63fac7..12c0612c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) type xmlErrorResponse struct { @@ -27,50 +26,40 @@ func unmarshalError(r *request.Request) { // Bucket exists in a different region, and request needs // to be made to the correct region. if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { - msg := fmt.Sprintf( - "incorrect region, the bucket is not in '%s' region at endpoint '%s'", - aws.StringValue(r.Config.Region), - aws.StringValue(r.Config.Endpoint), - ) - if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { - msg += fmt.Sprintf(", bucket is in '%s' region", v) - } r.Error = awserr.NewRequestFailure( - awserr.New("BucketRegionError", msg, nil), + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), r.HTTPResponse.StatusCode, r.RequestID, ) return } + var errCode, errMsg string + // Attempt to parse error from body if it is known - var errResp xmlErrorResponse - err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) - if err == io.EOF { - // Only capture the error if an unmarshal error occurs that is not EOF, - // because S3 might send an error without a error message which causes - // the XML unmarshal to fail with EOF. + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + errCode = "SerializationError" + errMsg = "failed to decode S3 XML error response" + } else { + errCode = resp.Code + errMsg = resp.Message err = nil } - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } // Fallback to status code converted to message if still no error code - if len(errResp.Code) == 0 { + if len(errCode) == 0 { statusText := http.StatusText(r.HTTPResponse.StatusCode) - errResp.Code = strings.Replace(statusText, " ", "", -1) - errResp.Message = statusText + errCode = strings.Replace(statusText, " ", "", -1) + errMsg = statusText } r.Error = awserr.NewRequestFailure( - awserr.New(errResp.Code, errResp.Message, err), + awserr.New(errCode, errMsg, err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index eb0a6a41..ee908f91 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -3,12 +3,10 @@ package sts import ( - "fmt" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" ) @@ -56,26 +54,38 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // AssumeRole API operation for AWS Security Token Service. // -// Returns a set of temporary security credentials that you can use to access -// AWS resources that you might not normally have access to. These temporary -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use AssumeRole within your account or for cross-account -// access. For a comparison of AssumeRole with other API operations that produce -// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. For a comparison of +// AssumeRole with the other APIs that produce temporary credentials, see Requesting +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// You cannot use AWS account root user credentials to call AssumeRole. You -// must use credentials for an IAM user or an IAM role to call AssumeRole. +// Important: You cannot call AssumeRole by using AWS root account credentials; +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. // // For cross-account access, imagine that you own multiple accounts and need // to access resources in each account. You could create long-term credentials // in each account to access those resources. However, managing all those credentials // and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account. -// Then use temporary security credentials to access all the other accounts +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts // by assuming roles in those accounts. For more information about roles, see -// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the IAM User Guide. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) // in the IAM User Guide. // // By default, the temporary security credentials created by AssumeRole last @@ -84,73 +94,69 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // seconds (15 minutes) up to the maximum session duration setting for the role. // This setting can have a value from 1 hour to 12 hours. To learn how to view // the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // // The temporary security credentials created by AssumeRole can be used to make -// API calls to any AWS service with the following exception: You cannot call -// the AWS STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// API calls to any AWS service with the following exception: you cannot call +// the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) // in the IAM User Guide. // -// To assume a role from a different account, your AWS account must be trusted -// by the role. The trust relationship is defined in the role's trust policy -// when the role is created. That trust policy states which accounts are allowed -// to delegate that access to users in the account. -// -// A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. If the user is in the same account as the -// role, then you can do either of the following: -// -// * Attach a policy to the user (identical to the previous user in a different -// account). -// -// * Add the user as a principal directly in the role's trust policy. -// -// In this case, the trust policy acts as an IAM resource-based policy. Users -// in the same account as the role do not need explicit permission to assume -// the role. For more information about trust policies and resource-based policies, -// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// That trust policy states which accounts are allowed to delegate access to +// this account's role. +// +// The user who wants to access the role must also have permissions delegated +// from the role's administrator. If the user is in a different account than +// the role, then the user's administrator must attach a policy that allows +// the user to call AssumeRole on the ARN of the role in the other account. +// If the user is in the same account as the role, then you can either attach +// a policy to the user (identical to the previous different account user), +// or you can add the user as a principal directly in the role's trust policy. +// In this case, the trust policy acts as the only resource-based policy in +// IAM, and users in the same account as the role do not need explicit permission +// to assume the role. For more information about trust policies and resource-based +// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // // Using MFA with AssumeRole // -// (Optional) You can include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios to ensure -// that the user that assumes the role has been authenticated with an AWS MFA -// device. In that scenario, the trust policy of the role being assumed includes -// a condition that tests for MFA authentication. If the caller does not include -// valid MFA information, the request to assume the role is denied. The condition -// in a trust policy that tests for MFA authentication might look like the following -// example. +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. // // "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} // -// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) // in the IAM User Guide guide. // // To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode // parameters. The SerialNumber value identifies the user's hardware or virtual // MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA device produces. +// the MFA devices produces. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -173,7 +179,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole @@ -237,7 +243,6 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re output = &AssumeRoleWithSAMLOutput{} req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials return } @@ -247,9 +252,9 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // via a SAML authentication response. This operation provides a mechanism for // tying an enterprise identity store or directory to role-based AWS access // without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML -// with the other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this operation consist of @@ -264,36 +269,37 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session // duration setting for the role. This setting can have a value from 1 hour // to 12 hours. To learn how to view the maximum value for your role, see View -// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot -// call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by the intersection of both the access policy +// of the role that is being assumed, and the policy that you pass. This means +// that both policies must grant the permission for the action to be allowed. +// This gives you a way to further restrict the permissions for the resulting +// temporary security credentials. You cannot use the passed policy to grant +// permissions that are in excess of those allowed by the access policy of the +// role that is being assumed. For more information, see Permissions for AssumeRole, +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) // in the IAM User Guide. // // Before your application can call AssumeRoleWithSAML, you must configure your // SAML identity provider (IdP) to issue the claims required by AWS. Additionally, // you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. // // Calling AssumeRoleWithSAML does not require the use of AWS security credentials. // The identity of the caller is validated by using keys in the metadata document @@ -307,16 +313,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // For more information, see the following resources: // -// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) // in the IAM User Guide. // -// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) // in the IAM User Guide. // -// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) // in the IAM User Guide. // -// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -355,7 +361,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML @@ -419,42 +425,41 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI output = &AssumeRoleWithWebIdentityOutput{} req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials return } // AssumeRoleWithWebIdentity API operation for AWS Security Token Service. // // Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider. Example providers -// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID -// Connect-compatible identity provider. +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. // // For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// to uniquely identify a user. You can also supply the user with a consistent -// identity throughout the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) // in the AWS SDK for iOS Developer Guide. // // Calling AssumeRoleWithWebIdentity does not require the use of AWS security // credentials. Therefore, you can distribute an application (for example, on // mobile devices) that requests temporary security credentials without including -// long-term AWS credentials in the application. You also don't need to deploy -// server-based proxy services that use long-term AWS credentials. Instead, -// the identity of the caller is validated by using a token from the web identity -// provider. For a comparison of AssumeRoleWithWebIdentity with the other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this API consist of an access // key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to AWS service API operations. +// temporary security credentials to sign calls to AWS service APIs. // // By default, the temporary security credentials created by AssumeRoleWithWebIdentity // last for one hour. However, you can use the optional DurationSeconds parameter @@ -462,29 +467,29 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // seconds (15 minutes) up to the maximum session duration setting for the role. // This setting can have a value from 1 hour to 12 hours. To learn how to view // the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: -// you cannot call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) // in the IAM User Guide. // // Before your application can call AssumeRoleWithWebIdentity, you must have @@ -503,19 +508,21 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // For more information about how to use web identity federation and the AssumeRoleWithWebIdentity // API, see the following resources: // -// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). -// Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then -// using those credentials to make a request to AWS. // -// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and -// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers, and then how to use the information from these providers to -// get and use temporary security credentials. +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security +// credentials, and then using those credentials to make a request to AWS. +// +// +// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample +// apps that show how to invoke the identity providers, and then how to use +// the information from these providers to get and use temporary security +// credentials. // // * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of @@ -565,7 +572,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity @@ -637,17 +644,17 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // Decodes additional information about the authorization status of a request // from an encoded message returned in response to an AWS request. // -// For example, if a user is not authorized to perform an operation that he -// or she has requested, the request returns a Client.UnauthorizedOperation -// response (an HTTP 403 response). Some AWS operations additionally return -// an encoded message that can provide details about this authorization failure. +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. // -// Only certain AWS operations return an encoded authorization message. The -// documentation for an individual operation indicates whether that operation -// returns an encoded message in addition to returning an HTTP code. +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. // // The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the operation +// constitute privileged information that the user who requested the action // should not see. To decode an authorization status message, a user must be // granted permissions via an IAM policy to request the DecodeAuthorizationMessage // (sts:DecodeAuthorizationMessage) action. @@ -656,7 +663,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // * Whether the request was denied due to an explicit deny or due to the // absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) // in the IAM User Guide. // // * The principal who made the request. @@ -702,102 +709,6 @@ func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *Deco return out, req.Send() } -const opGetAccessKeyInfo = "GetAccessKeyInfo" - -// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the -// client's request for the GetAccessKeyInfo operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetAccessKeyInfoRequest method. -// req, resp := client.GetAccessKeyInfoRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { - op := &request.Operation{ - Name: opGetAccessKeyInfo, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetAccessKeyInfoInput{} - } - - output = &GetAccessKeyInfoOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetAccessKeyInfo API operation for AWS Security Token Service. -// -// Returns the account identifier for the specified access key ID. -// -// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) -// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). -// For more information about access keys, see Managing Access Keys for IAM -// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. -// -// When you pass an access key ID to this operation, it returns the ID of the -// AWS account to which the keys belong. Access key IDs beginning with AKIA -// are long-term credentials for an IAM user or the AWS account root user. Access -// key IDs beginning with ASIA are temporary credentials that are created using -// STS operations. If the account in the response belongs to you, you can sign -// in as the root user and review your root user access keys. Then, you can -// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail -// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html). -// -// This operation does not indicate the state of the access key. The key might -// be active, inactive, or deleted. Active keys might not have permissions to -// perform an operation. Providing a deleted access key might return an error -// that the key doesn't exist. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetAccessKeyInfo for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - return out, req.Send() -} - -// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of -// the ability to pass a context and additional request options. -// -// See GetAccessKeyInfo for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the @@ -842,15 +753,8 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // GetCallerIdentity API operation for AWS Security Token Service. // -// Returns details about the IAM user or role whose credentials are used to -// call the operation. -// -// No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa). +// Returns details about the IAM identity whose credentials are used to call +// the API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -927,65 +831,81 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // Returns a set of temporary security credentials (consisting of an access // key ID, a secret access key, and a security token) for a federated user. // A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those // credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// For a comparison of GetFederationToken with the other APIs that produce temporary +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// You can create a mobile-based or browser-based app that can authenticate +// If you are creating a mobile-based or browser-based app that can authenticate // users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider. In this case, we recommend -// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. // For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// You can also call GetFederationToken using the security credentials of an -// AWS account root user, but we do not recommend it. Instead, we recommend -// that you create an IAM user for the purpose of the proxy application. Then -// attach a policy to the IAM user that limits federated users to only the actions -// and resources that they need to access. For more information, see IAM Best -// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS security +// credentials of an IAM user. You can also call GetFederationToken using the +// security credentials of an AWS root account, but we do not recommended it. +// Instead, we recommend that you create an IAM user for the purpose of the +// proxy application and then attach a policy to the IAM user that limits federated +// users to only the actions and resources that they need access to. For more +// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// is 43,200 seconds (12 hours). Temporary credentials that are obtained by -// using AWS account root user credentials have a maximum duration of 3,600 -// seconds (1 hour). +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). // // The temporary security credentials created by GetFederationToken can be used // to make API calls to any AWS service with the following exceptions: // -// * You cannot use these credentials to call any IAM API operations. +// * You cannot use these credentials to call any IAM APIs. // -// * You cannot call any STS API operations except GetCallerIdentity. +// * You cannot call any STS APIs except GetCallerIdentity. // // Permissions // -// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. The only exception -// is when the credentials are used to access a resource that has a resource-based -// policy that specifically references the federated user session in the Principal -// element of the policy. When you pass session policies, the session permissions -// are the intersection of the IAM user policies and the session policies that -// you pass. This gives you a way to further restrict the permissions for a -// federated user. You cannot use session policies to grant more permissions -// than those that are defined in the permissions policy of the IAM user. For -// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to -// create temporary security credentials, see GetFederationToken—Federation -// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// * The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. +// +// * The policy that is passed as a parameter in the call. +// +// The passed policy is attached to the temporary security credentials that +// result from the GetFederationToken API call--that is, to the federated user. +// When the federated user makes an AWS request, AWS evaluates the policy attached +// to the federated user in combination with the policy or policies attached +// to the IAM user whose credentials were used to call GetFederationToken. AWS +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The +// passed policy cannot grant more permissions than those that are defined in +// the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1008,7 +928,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken @@ -1080,47 +1000,48 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // Returns a set of temporary credentials for an AWS account or IAM user. The // credentials consist of an access key ID, a secret access key, and a security // token. Typically, you use GetSessionToken if you want to use MFA to protect -// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. -// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA -// code that is associated with their MFA device. Using the temporary security -// credentials that are returned from the call, IAM users can then make programmatic -// calls to API operations that require MFA authentication. If you do not supply -// a correct MFA code, then the API returns an access denied error. For a comparison -// of GetSessionToken with the other API operations that produce temporary credentials, -// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. For a comparison of GetSessionToken +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// The GetSessionToken operation must be called by using the long-term AWS security -// credentials of the AWS account root user or an IAM user. Credentials that -// are created by IAM users are valid for the duration that you specify. This -// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 -// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials -// based on account credentials can range from 900 seconds (15 minutes) up to -// 3,600 seconds (1 hour), with a default of 1 hour. +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, from 900 seconds +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default +// of 43200 seconds (12 hours); credentials that are created by using account +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 +// seconds (1 hour), with a default of 1 hour. // // The temporary security credentials created by GetSessionToken can be used // to make API calls to any AWS service with the following exceptions: // -// * You cannot call any IAM API operations unless MFA authentication information -// is included in the request. +// * You cannot call any IAM APIs unless MFA authentication information is +// included in the request. // -// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with AWS account root user -// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) // by creating one or more IAM users, giving them the necessary permissions, // and using IAM users for everyday interaction with AWS. // -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using AWS account root user credentials, the -// temporary credentials have root user permissions. Similarly, if GetSessionToken -// is called using the credentials of an IAM user, the temporary credentials -// have the same permissions as the IAM user. +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. // // For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1135,7 +1056,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken @@ -1170,7 +1091,7 @@ type AssumeRoleInput struct { // a session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1180,77 +1101,51 @@ type AssumeRoleInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // A unique identifier that might be required when you assume a role in another - // account. If the administrator of the account to which the role belongs provided - // you with an external ID, then provide that value in the ExternalId parameter. - // This value can be any string, such as a passphrase or account number. A cross-account - // role is usually set up to trust everyone in an account. Therefore, the administrator - // of the trusting account might send an external ID to the administrator of - // the trusted account. That way, only someone with the ID can assume the role, - // rather than everyone in the account. For more information about the external - // ID, see How to Use an External ID When Granting Access to Your AWS Resources - // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) // in the IAM User Guide. // - // The regex used to validate this parameter is a string of characters consisting + // The regex used to validated this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can // also include underscores or any of the following characters: =,.@:/- ExternalId *string `min:"2" type:"string"` - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) // in the IAM User Guide. // - // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. Policy *string `min:"1" type:"string"` - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - // The Amazon Resource Name (ARN) of the role to assume. // // RoleArn is a required field @@ -1263,8 +1158,8 @@ type AssumeRoleInput struct { // scenarios, the role session name is visible to, and can be logged by the // account that owns the role. The role session name is also used in the ARN // of the assumed role principal. This means that subsequent cross-account API - // requests that use the temporary security credentials will expose the role - // session name to the external account in their AWS CloudTrail logs. + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can @@ -1334,16 +1229,6 @@ func (s *AssumeRoleInput) Validate() error { if s.TokenCode != nil && len(*s.TokenCode) < 6 { invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } if invalidParams.Len() > 0 { return invalidParams @@ -1369,12 +1254,6 @@ func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { return s } -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { - s.PolicyArns = v - return s -} - // SetRoleArn sets the RoleArn field's value. func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { s.RoleArn = &v @@ -1414,8 +1293,10 @@ type AssumeRoleOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // A percentage value that indicates the size of the policy in packed form. @@ -1465,7 +1346,7 @@ type AssumeRoleWithSAMLInput struct { // specify a session duration of 12 hours, but your administrator set the maximum // session duration to 6 hours, your operation fails. To learn how to view the // maximum value for your role, see View the Maximum Session Duration Setting - // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1475,60 +1356,36 @@ type AssumeRoleWithSAMLInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) // in the IAM User Guide. // - // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. Policy *string `min:"1" type:"string"` - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes // the IdP. // @@ -1542,8 +1399,8 @@ type AssumeRoleWithSAMLInput struct { // The base-64 encoded SAML authentication response provided by the IdP. // - // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. + // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. // // SAMLAssertion is a required field SAMLAssertion *string `min:"4" type:"string" required:"true"` @@ -1586,16 +1443,6 @@ func (s *AssumeRoleWithSAMLInput) Validate() error { if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } if invalidParams.Len() > 0 { return invalidParams @@ -1615,12 +1462,6 @@ func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { return s } -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { - s.PolicyArns = v - return s -} - // SetPrincipalArn sets the PrincipalArn field's value. func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { s.PrincipalArn = &v @@ -1655,8 +1496,10 @@ type AssumeRoleWithSAMLOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // The value of the Issuer element of the SAML assertion. @@ -1760,7 +1603,7 @@ type AssumeRoleWithWebIdentityInput struct { // a session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1770,60 +1613,35 @@ type AssumeRoleWithWebIdentityInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // An IAM policy in JSON format that you want to use as an inline session policy. + // An IAM policy in JSON format. // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) // in the IAM User Guide. // - // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. Policy *string `min:"1" type:"string"` - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - // The fully qualified host component of the domain name of the identity provider. // // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com @@ -1900,16 +1718,6 @@ func (s *AssumeRoleWithWebIdentityInput) Validate() error { if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } if invalidParams.Len() > 0 { return invalidParams @@ -1929,12 +1737,6 @@ func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebI return s } -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { - s.PolicyArns = v - return s -} - // SetProviderId sets the ProviderId field's value. func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { s.ProviderId = &v @@ -1979,8 +1781,10 @@ type AssumeRoleWithWebIdentityOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security token. // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // A percentage value that indicates the size of the policy in packed form. @@ -1989,7 +1793,7 @@ type AssumeRoleWithWebIdentityOutput struct { PackedPolicySize *int64 `type:"integer"` // The issuing authority of the web identity token presented. For OpenID Connect - // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access // tokens, this contains the value of the ProviderId parameter that was passed // in the AssumeRoleWithWebIdentity request. Provider *string `type:"string"` @@ -2056,7 +1860,7 @@ type AssumedRoleUser struct { // The ARN of the temporary security credentials that are returned from the // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in Using IAM. // // Arn is a required field @@ -2224,7 +2028,7 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the credentials. // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in Using IAM. // // Arn is a required field @@ -2259,73 +2063,6 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } -type GetAccessKeyInfoInput struct { - _ struct{} `type:"structure"` - - // The identifier of an access key. - // - // This parameter allows (through its regex pattern) a string of characters - // that can consist of any upper- or lowercased letter or digit. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetAccessKeyInfoInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAccessKeyInfoInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetAccessKeyInfoInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} - if s.AccessKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) - } - if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { - invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { - s.AccessKeyId = &v - return s -} - -type GetAccessKeyInfoOutput struct { - _ struct{} `type:"structure"` - - // The number used to identify the AWS account. - Account *string `type:"string"` -} - -// String returns the string representation -func (s GetAccessKeyInfoOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetAccessKeyInfoOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { - s.Account = &v - return s -} - type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2353,8 +2090,8 @@ type GetCallerIdentityOutput struct { Arn *string `min:"20" type:"string"` // The unique identifier of the calling entity. The exact value depends on the - // type of entity that is making the call. The values returned are those listed - // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // type of entity making the call. The values returned are those listed in the + // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) // found on the Policy Variables reference page in the IAM User Guide. UserId *string `type:"string"` } @@ -2391,11 +2128,12 @@ type GetFederationTokenInput struct { _ struct{} `type:"structure"` // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds - // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using AWS account root user credentials are restricted to a maximum of 3,600 + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 // seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using root user credentials defaults to one hour. + // session obtained by using AWS account (root) credentials defaults to one + // hour. DurationSeconds *int64 `min:"900" type:"integer"` // The name of the federated user. The name is used as an identifier for the @@ -2410,73 +2148,36 @@ type GetFederationTokenInput struct { // Name is a required field Name *string `min:"2" type:"string" required:"true"` - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. The only exception - // is when the credentials are used to access a resource that has a resource-based - // policy that specifically references the federated user session in the Principal - // element of the policy. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as a managed session policy. The policies must exist in the same account - // as the IAM user that is requesting federated access. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plain text that you use for both inline - // and managed session policies shouldn't exceed 2048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. The only exception - // is when the credentials are used to access a resource that has a resource-based - // policy that specifically references the federated user session in the Principal - // element of the policy. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. - PolicyArns []*PolicyDescriptorType `type:"list"` + // For more information about how permissions work, see Permissions for GetFederationToken + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` } // String returns the string representation @@ -2504,16 +2205,6 @@ func (s *GetFederationTokenInput) Validate() error { if s.Policy != nil && len(*s.Policy) < 1 { invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } if invalidParams.Len() > 0 { return invalidParams @@ -2539,12 +2230,6 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { return s } -// SetPolicyArns sets the PolicyArns field's value. -func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { - s.PolicyArns = v - return s -} - // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. type GetFederationTokenOutput struct { @@ -2553,8 +2238,10 @@ type GetFederationTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // Identifiers for the federated user associated with the credentials (such @@ -2601,11 +2288,11 @@ type GetSessionTokenInput struct { _ struct{} `type:"structure"` // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 - // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions - // for AWS account owners are restricted to a maximum of 3,600 seconds (one - // hour). If the duration is longer than one hour, the session for AWS account - // owners defaults to one hour. + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The identification number of the MFA device that is associated with the IAM @@ -2616,16 +2303,16 @@ type GetSessionTokenInput struct { // You can find the device for an IAM user by going to the AWS Management Console // and viewing the user's security credentials. // - // The regex used to validate this parameter is a string of characters consisting + // The regex used to validated this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can // also include underscores or any of the following characters: =,.@:/- SerialNumber *string `min:"9" type:"string"` // The value provided by the MFA device, if MFA is required. If any policy requires // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, the user must provide a code when requesting a set of temporary - // security credentials. A user who fails to provide the code receives an "access - // denied" response when requesting resources that require MFA authentication. + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. // // The format for this parameter, as described by its regex pattern, is a sequence // of six numeric digits. @@ -2687,8 +2374,10 @@ type GetSessionTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` } @@ -2707,44 +2396,3 @@ func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenO s.Credentials = v return s } - -// A reference to the IAM managed policy that is passed as a session policy -// for a role session or a federated user session. -type PolicyDescriptorType struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource - // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. - Arn *string `locationName:"arn" min:"20" type:"string"` -} - -// String returns the string representation -func (s PolicyDescriptorType) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PolicyDescriptorType) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PolicyDescriptorType) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} - if s.Arn != nil && len(*s.Arn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetArn sets the Arn field's value. -func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { - s.Arn = &v - return s -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go index d5307fca..4010cc7f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -3,9 +3,10 @@ package sts import "github.com/aws/aws-sdk-go/aws/request" func init() { - initRequest = customizeRequest -} - -func customizeRequest(r *request.Request) { - r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index fcb720dc..ef681ab0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -7,14 +7,22 @@ // request temporary, limited-privilege credentials for AWS Identity and Access // Management (IAM) users or for users that you authenticate (federated users). // This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). // // For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) // in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) // in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) // in the IAM User Guide. // // If you're new to AWS and need additional technical information about a specific @@ -23,38 +31,14 @@ // // Endpoints // -// By default, AWS Security Token Service (STS) is available as a global service, -// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. -// Global requests map to the US East (N. Virginia) region. AWS recommends using -// Regional AWS STS endpoints instead of the global endpoint to reduce latency, -// build in redundancy, and increase session token validity. For more information, -// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Most AWS Regions are enabled for operations in all AWS services by default. -// Those Regions are automatically activated for use with AWS STS. Some Regions, -// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more -// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) -// in the AWS General Reference. When you enable these AWS Regions, they are -// automatically activated for use with AWS STS. You cannot activate the STS -// endpoint for a Region that is disabled. Tokens that are valid in all AWS -// Regions are longer than tokens that are valid in Regions that are enabled -// by default. Changing this setting might affect existing systems where you -// temporarily store tokens. For more information, see Managing Global Endpoint -// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// After you activate a Region for use with AWS STS, you can direct AWS STS -// API calls to that Region. AWS STS recommends that you provide both the Region -// and endpoint when you make calls to a Regional endpoint. You can provide -// the Region alone for manually enabled Regions, such as Asia Pacific (Hong -// Kong). In this case, the calls are directed to the STS Regional endpoint. -// However, if you provide the Region alone for Regions enabled by default, -// the calls are directed to the global endpoint of https://sts.amazonaws.com. -// -// To view the list of AWS STS endpoints and whether they are active by default, -// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) -// in the IAM User Guide. +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. // // Recording API requests // @@ -62,28 +46,8 @@ // your AWS account and delivers log files to an Amazon S3 bucket. By using // information collected by CloudTrail, you can determine what requests were // successfully made to STS, who made the request, when it was made, and so -// on. -// -// If you activate AWS STS endpoints in Regions other than the default global -// endpoint, then you must also turn on CloudTrail logging in those Regions. -// This is necessary to record any AWS STS API calls that are made in those -// Regions. For more information, see Turning On CloudTrail in Additional Regions -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) -// in the AWS CloudTrail User Guide. -// -// AWS Security Token Service (STS) is a global service with a single endpoint -// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls -// to a global service. However, because this endpoint is physically located -// in the US East (N. Virginia) Region, your logs list us-east-1 as the event -// Region. CloudTrail does not write these logs to the US East (Ohio) Region -// unless you choose to include global service logs in that Region. CloudTrail -// writes calls to all Regional endpoints to their respective Regions. For example, -// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) -// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU -// (Frankfurt) Region. -// -// To learn more about CloudTrail, including how to turn it on and find your -// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index 41ea09c3..e24884ef 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -67,7 +67,7 @@ const ( // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating - // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. ErrCodeRegionDisabledException = "RegionDisabledException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go deleted file mode 100644 index e2e1d6ef..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package stsiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sts" -) - -// STSAPI provides an interface to enable mocking the -// sts.STS service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // AWS Security Token Service. -// func myFunc(svc stsiface.STSAPI) bool { -// // Make svc.AssumeRole request -// } -// -// func main() { -// sess := session.New() -// svc := sts.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSTSClient struct { -// stsiface.STSAPI -// } -// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSTSClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type STSAPI interface { - AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) - AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) - AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) - - AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) - - AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) - - DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) - - GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) - - GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) - - GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) - GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) - GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) - - GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) - GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) - GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) -} - -var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore deleted file mode 100644 index 9e131146..00000000 --- a/vendor/github.com/bgentry/speakeasy/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -example/example -example/example.exe diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE b/vendor/github.com/bgentry/speakeasy/LICENSE deleted file mode 100644 index 37d60fc3..00000000 --- a/vendor/github.com/bgentry/speakeasy/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -MIT License - -Copyright (c) 2017 Blake Gentry - -This license applies to the non-Windows portions of this library. The Windows -portion maintains its own Apache 2.0 license. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS deleted file mode 100644 index ff177f61..00000000 --- a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [2013] [the CloudFoundry Authors] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md deleted file mode 100644 index fceda751..00000000 --- a/vendor/github.com/bgentry/speakeasy/Readme.md +++ /dev/null @@ -1,30 +0,0 @@ -# Speakeasy - -This package provides cross-platform Go (#golang) helpers for taking user input -from the terminal while not echoing the input back (similar to `getpasswd`). The -package uses syscalls to avoid any dependence on cgo, and is therefore -compatible with cross-compiling. - -[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] - -## Unicode - -Multi-byte unicode characters work successfully on Mac OS X. On Windows, -however, this may be problematic (as is UTF in general on Windows). Other -platforms have not been tested. - -## License - -The code herein was not written by me, but was compiled from two separate open -source packages. Unix portions were imported from [gopass][gopass], while -Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s -[Windows terminal helpers][cf-ui-windows]. - -The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly -from the source (though I attempted to fill in the correct owner in the -boilerplate copyright notice). - -[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" -[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" -[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" -[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go deleted file mode 100644 index 71c1dd1b..00000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy.go +++ /dev/null @@ -1,49 +0,0 @@ -package speakeasy - -import ( - "fmt" - "io" - "os" - "strings" -) - -// Ask the user to enter a password with input hidden. prompt is a string to -// display before the user's input. Returns the provided password, or an error -// if the command failed. -func Ask(prompt string) (password string, err error) { - return FAsk(os.Stdout, prompt) -} - -// FAsk is the same as Ask, except it is possible to specify the file to write -// the prompt to. If 'nil' is passed as the writer, no prompt will be written. -func FAsk(wr io.Writer, prompt string) (password string, err error) { - if wr != nil && prompt != "" { - fmt.Fprint(wr, prompt) // Display the prompt. - } - password, err = getPassword() - - // Carriage return after the user input. - if wr != nil { - fmt.Fprintln(wr, "") - } - return -} - -func readline() (value string, err error) { - var valb []byte - var n int - b := make([]byte, 1) - for { - // read one byte at a time so we don't accidentally read extra bytes - n, err = os.Stdin.Read(b) - if err != nil && err != io.EOF { - return "", err - } - if n == 0 || b[0] == '\n' { - break - } - valb = append(valb, b[0]) - } - - return strings.TrimSuffix(string(valb), "\r"), nil -} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go deleted file mode 100644 index d99fda19..00000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go +++ /dev/null @@ -1,93 +0,0 @@ -// based on https://code.google.com/p/gopass -// Author: johnsiilver@gmail.com (John Doak) -// -// Original code is based on code by RogerV in the golang-nuts thread: -// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package speakeasy - -import ( - "fmt" - "os" - "os/signal" - "strings" - "syscall" -) - -const sttyArg0 = "/bin/stty" - -var ( - sttyArgvEOff = []string{"stty", "-echo"} - sttyArgvEOn = []string{"stty", "echo"} -) - -// getPassword gets input hidden from the terminal from a user. This is -// accomplished by turning off terminal echo, reading input from the user and -// finally turning on terminal echo. -func getPassword() (password string, err error) { - sig := make(chan os.Signal, 10) - brk := make(chan bool) - - // File descriptors for stdin, stdout, and stderr. - fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} - - // Setup notifications of termination signals to channel sig, create a process to - // watch for these signals so we can turn back on echo if need be. - signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, - syscall.SIGTERM) - go catchSignal(fd, sig, brk) - - // Turn off the terminal echo. - pid, err := echoOff(fd) - if err != nil { - return "", err - } - - // Turn on the terminal echo and stop listening for signals. - defer signal.Stop(sig) - defer close(brk) - defer echoOn(fd) - - syscall.Wait4(pid, nil, 0, nil) - - line, err := readline() - if err == nil { - password = strings.TrimSpace(line) - } else { - err = fmt.Errorf("failed during password entry: %s", err) - } - - return password, err -} - -// echoOff turns off the terminal echo. -func echoOff(fd []uintptr) (int, error) { - pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) - if err != nil { - return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) - } - return pid, nil -} - -// echoOn turns back on the terminal echo. -func echoOn(fd []uintptr) { - // Turn on the terminal echo. - pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) - if e == nil { - syscall.Wait4(pid, nil, 0, nil) - } -} - -// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn -// terminal echo back on before the program ends. Otherwise the user is left -// with echo off on their terminal. -func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { - select { - case <-sig: - echoOn(fd) - os.Exit(-1) - case <-brk: - } -} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go deleted file mode 100644 index c2093a80..00000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build windows - -package speakeasy - -import ( - "syscall" -) - -// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx -const ENABLE_ECHO_INPUT = 0x0004 - -func getPassword() (password string, err error) { - var oldMode uint32 - - err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) - if err != nil { - return - } - - var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) - - err = setConsoleMode(syscall.Stdin, newMode) - defer setConsoleMode(syscall.Stdin, oldMode) - if err != nil { - return - } - - return readline() -} - -func setConsoleMode(console syscall.Handle, mode uint32) (err error) { - dll := syscall.MustLoadDLL("kernel32") - proc := dll.MustFindProc("SetConsoleMode") - r, _, err := proc.Call(uintptr(console), uintptr(mode)) - - if r == 0 { - return err - } - return nil -} diff --git a/vendor/github.com/cenkalti/backoff/.gitignore b/vendor/github.com/cenkalti/backoff/.gitignore deleted file mode 100644 index 00268614..00000000 --- a/vendor/github.com/cenkalti/backoff/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/cenkalti/backoff/v3/.gitignore similarity index 100% rename from vendor/github.com/armon/go-radix/.gitignore rename to vendor/github.com/cenkalti/backoff/v3/.gitignore diff --git a/vendor/github.com/cenkalti/backoff/.travis.yml b/vendor/github.com/cenkalti/backoff/v3/.travis.yml similarity index 100% rename from vendor/github.com/cenkalti/backoff/.travis.yml rename to vendor/github.com/cenkalti/backoff/v3/.travis.yml diff --git a/vendor/github.com/cenkalti/backoff/LICENSE b/vendor/github.com/cenkalti/backoff/v3/LICENSE similarity index 100% rename from vendor/github.com/cenkalti/backoff/LICENSE rename to vendor/github.com/cenkalti/backoff/v3/LICENSE diff --git a/vendor/github.com/cenkalti/backoff/README.md b/vendor/github.com/cenkalti/backoff/v3/README.md similarity index 100% rename from vendor/github.com/cenkalti/backoff/README.md rename to vendor/github.com/cenkalti/backoff/v3/README.md diff --git a/vendor/github.com/cenkalti/backoff/backoff.go b/vendor/github.com/cenkalti/backoff/v3/backoff.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/backoff.go rename to vendor/github.com/cenkalti/backoff/v3/backoff.go diff --git a/vendor/github.com/cenkalti/backoff/context.go b/vendor/github.com/cenkalti/backoff/v3/context.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/context.go rename to vendor/github.com/cenkalti/backoff/v3/context.go diff --git a/vendor/github.com/cenkalti/backoff/exponential.go b/vendor/github.com/cenkalti/backoff/v3/exponential.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/exponential.go rename to vendor/github.com/cenkalti/backoff/v3/exponential.go diff --git a/vendor/github.com/cenkalti/backoff/v3/go.mod b/vendor/github.com/cenkalti/backoff/v3/go.mod new file mode 100644 index 00000000..479e62ad --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/go.mod @@ -0,0 +1,3 @@ +module github.com/cenkalti/backoff/v3 + +go 1.12 diff --git a/vendor/github.com/cenkalti/backoff/retry.go b/vendor/github.com/cenkalti/backoff/v3/retry.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/retry.go rename to vendor/github.com/cenkalti/backoff/v3/retry.go diff --git a/vendor/github.com/cenkalti/backoff/ticker.go b/vendor/github.com/cenkalti/backoff/v3/ticker.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/ticker.go rename to vendor/github.com/cenkalti/backoff/v3/ticker.go diff --git a/vendor/github.com/cenkalti/backoff/tries.go b/vendor/github.com/cenkalti/backoff/v3/tries.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/tries.go rename to vendor/github.com/cenkalti/backoff/v3/tries.go diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go deleted file mode 100644 index 818bba2c..00000000 --- a/vendor/github.com/containerd/continuity/fs/copy.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/pkg/errors" -) - -var bufferPool = &sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 32*1024) - return &buffer - }, -} - -// XAttrErrorHandlers transform a non-nil xattr error. -// Return nil to ignore an error. -// xattrKey can be empty for listxattr operation. -type XAttrErrorHandler func(dst, src, xattrKey string, err error) error - -type copyDirOpts struct { - xeh XAttrErrorHandler -} - -type CopyDirOpt func(*copyDirOpts) error - -// WithXAttrErrorHandler allows specifying XAttrErrorHandler -// If nil XAttrErrorHandler is specified (default), CopyDir stops -// on a non-nil xattr error. -func WithXAttrErrorHandler(xeh XAttrErrorHandler) CopyDirOpt { - return func(o *copyDirOpts) error { - o.xeh = xeh - return nil - } -} - -// WithAllowXAttrErrors allows ignoring xattr errors. -func WithAllowXAttrErrors() CopyDirOpt { - xeh := func(dst, src, xattrKey string, err error) error { - return nil - } - return WithXAttrErrorHandler(xeh) -} - -// CopyDir copies the directory from src to dst. -// Most efficient copy of files is attempted. -func CopyDir(dst, src string, opts ...CopyDirOpt) error { - var o copyDirOpts - for _, opt := range opts { - if err := opt(&o); err != nil { - return err - } - } - inodes := map[uint64]string{} - return copyDirectory(dst, src, inodes, &o) -} - -func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) error { - stat, err := os.Stat(src) - if err != nil { - return errors.Wrapf(err, "failed to stat %s", src) - } - if !stat.IsDir() { - return errors.Errorf("source %s is not directory", src) - } - - if st, err := os.Stat(dst); err != nil { - if err := os.Mkdir(dst, stat.Mode()); err != nil { - return errors.Wrapf(err, "failed to mkdir %s", dst) - } - } else if !st.IsDir() { - return errors.Errorf("cannot copy to non-directory: %s", dst) - } else { - if err := os.Chmod(dst, stat.Mode()); err != nil { - return errors.Wrapf(err, "failed to chmod on %s", dst) - } - } - - fis, err := ioutil.ReadDir(src) - if err != nil { - return errors.Wrapf(err, "failed to read %s", src) - } - - if err := copyFileInfo(stat, dst); err != nil { - return errors.Wrapf(err, "failed to copy file info for %s", dst) - } - - if err := copyXAttrs(dst, src, o.xeh); err != nil { - return errors.Wrap(err, "failed to copy xattrs") - } - - for _, fi := range fis { - source := filepath.Join(src, fi.Name()) - target := filepath.Join(dst, fi.Name()) - - switch { - case fi.IsDir(): - if err := copyDirectory(target, source, inodes, o); err != nil { - return err - } - continue - case (fi.Mode() & os.ModeType) == 0: - link, err := getLinkSource(target, fi, inodes) - if err != nil { - return errors.Wrap(err, "failed to get hardlink") - } - if link != "" { - if err := os.Link(link, target); err != nil { - return errors.Wrap(err, "failed to create hard link") - } - } else if err := CopyFile(target, source); err != nil { - return errors.Wrap(err, "failed to copy files") - } - case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink: - link, err := os.Readlink(source) - if err != nil { - return errors.Wrapf(err, "failed to read link: %s", source) - } - if err := os.Symlink(link, target); err != nil { - return errors.Wrapf(err, "failed to create symlink: %s", target) - } - case (fi.Mode() & os.ModeDevice) == os.ModeDevice: - if err := copyDevice(target, fi); err != nil { - return errors.Wrapf(err, "failed to create device") - } - default: - // TODO: Support pipes and sockets - return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) - } - if err := copyFileInfo(fi, target); err != nil { - return errors.Wrap(err, "failed to copy file info") - } - - if err := copyXAttrs(target, source, o.xeh); err != nil { - return errors.Wrap(err, "failed to copy xattrs") - } - } - - return nil -} - -// CopyFile copies the source file to the target. -// The most efficient means of copying is used for the platform. -func CopyFile(target, source string) error { - src, err := os.Open(source) - if err != nil { - return errors.Wrapf(err, "failed to open source %s", source) - } - defer src.Close() - tgt, err := os.Create(target) - if err != nil { - return errors.Wrapf(err, "failed to open target %s", target) - } - defer tgt.Close() - - return copyFileContent(tgt, src) -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_linux.go b/vendor/github.com/containerd/continuity/fs/copy_linux.go deleted file mode 100644 index 72bae7d4..00000000 --- a/vendor/github.com/containerd/continuity/fs/copy_linux.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "io" - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func copyFileInfo(fi os.FileInfo, name string) error { - st := fi.Sys().(*syscall.Stat_t) - if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { - if os.IsPermission(err) { - // Normally if uid/gid are the same this would be a no-op, but some - // filesystems may still return EPERM... for instance NFS does this. - // In such a case, this is not an error. - if dstStat, err2 := os.Lstat(name); err2 == nil { - st2 := dstStat.Sys().(*syscall.Stat_t) - if st.Uid == st2.Uid && st.Gid == st2.Gid { - err = nil - } - } - } - if err != nil { - return errors.Wrapf(err, "failed to chown %s", name) - } - } - - if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { - if err := os.Chmod(name, fi.Mode()); err != nil { - return errors.Wrapf(err, "failed to chmod %s", name) - } - } - - timespec := []unix.Timespec{ - unix.NsecToTimespec(syscall.TimespecToNsec(StatAtime(st))), - unix.NsecToTimespec(syscall.TimespecToNsec(StatMtime(st))), - } - if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrapf(err, "failed to utime %s", name) - } - - return nil -} - -const maxSSizeT = int64(^uint(0) >> 1) - -func copyFileContent(dst, src *os.File) error { - st, err := src.Stat() - if err != nil { - return errors.Wrap(err, "unable to stat source") - } - - size := st.Size() - first := true - srcFd := int(src.Fd()) - dstFd := int(dst.Fd()) - - for size > 0 { - // Ensure that we are never trying to copy more than SSIZE_MAX at a - // time and at the same time avoids overflows when the file is larger - // than 4GB on 32-bit systems. - var copySize int - if size > maxSSizeT { - copySize = int(maxSSizeT) - } else { - copySize = int(size) - } - n, err := unix.CopyFileRange(srcFd, nil, dstFd, nil, copySize, 0) - if err != nil { - if (err != unix.ENOSYS && err != unix.EXDEV) || !first { - return errors.Wrap(err, "copy file range failed") - } - - buf := bufferPool.Get().(*[]byte) - _, err = io.CopyBuffer(dst, src, *buf) - bufferPool.Put(buf) - return errors.Wrap(err, "userspace copy failed") - } - - first = false - size -= int64(n) - } - - return nil -} - -func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { - xattrKeys, err := sysx.LListxattr(src) - if err != nil { - e := errors.Wrapf(err, "failed to list xattrs on %s", src) - if xeh != nil { - e = xeh(dst, src, "", e) - } - return e - } - for _, xattr := range xattrKeys { - data, err := sysx.LGetxattr(src, xattr) - if err != nil { - e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) - if xeh != nil { - if e = xeh(dst, src, xattr, e); e == nil { - continue - } - } - return e - } - if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { - e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) - if xeh != nil { - if e = xeh(dst, src, xattr, e); e == nil { - continue - } - } - return e - } - } - - return nil -} - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_unix.go b/vendor/github.com/containerd/continuity/fs/copy_unix.go deleted file mode 100644 index a5de8926..00000000 --- a/vendor/github.com/containerd/continuity/fs/copy_unix.go +++ /dev/null @@ -1,112 +0,0 @@ -// +build darwin freebsd openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "io" - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func copyFileInfo(fi os.FileInfo, name string) error { - st := fi.Sys().(*syscall.Stat_t) - if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { - if os.IsPermission(err) { - // Normally if uid/gid are the same this would be a no-op, but some - // filesystems may still return EPERM... for instance NFS does this. - // In such a case, this is not an error. - if dstStat, err2 := os.Lstat(name); err2 == nil { - st2 := dstStat.Sys().(*syscall.Stat_t) - if st.Uid == st2.Uid && st.Gid == st2.Gid { - err = nil - } - } - } - if err != nil { - return errors.Wrapf(err, "failed to chown %s", name) - } - } - - if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { - if err := os.Chmod(name, fi.Mode()); err != nil { - return errors.Wrapf(err, "failed to chmod %s", name) - } - } - - timespec := []syscall.Timespec{StatAtime(st), StatMtime(st)} - if err := syscall.UtimesNano(name, timespec); err != nil { - return errors.Wrapf(err, "failed to utime %s", name) - } - - return nil -} - -func copyFileContent(dst, src *os.File) error { - buf := bufferPool.Get().(*[]byte) - _, err := io.CopyBuffer(dst, src, *buf) - bufferPool.Put(buf) - - return err -} - -func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { - xattrKeys, err := sysx.LListxattr(src) - if err != nil { - e := errors.Wrapf(err, "failed to list xattrs on %s", src) - if xeh != nil { - e = xeh(dst, src, "", e) - } - return e - } - for _, xattr := range xattrKeys { - data, err := sysx.LGetxattr(src, xattr) - if err != nil { - e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) - if xeh != nil { - if e = xeh(dst, src, xattr, e); e == nil { - continue - } - } - return e - } - if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { - e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) - if xeh != nil { - if e = xeh(dst, src, xattr, e); e == nil { - continue - } - } - return e - } - } - - return nil -} - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_windows.go b/vendor/github.com/containerd/continuity/fs/copy_windows.go deleted file mode 100644 index 27c7d7db..00000000 --- a/vendor/github.com/containerd/continuity/fs/copy_windows.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "io" - "os" - - "github.com/pkg/errors" -) - -func copyFileInfo(fi os.FileInfo, name string) error { - if err := os.Chmod(name, fi.Mode()); err != nil { - return errors.Wrapf(err, "failed to chmod %s", name) - } - - // TODO: copy windows specific metadata - - return nil -} - -func copyFileContent(dst, src *os.File) error { - buf := bufferPool.Get().(*[]byte) - _, err := io.CopyBuffer(dst, src, *buf) - bufferPool.Put(buf) - return err -} - -func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { - return nil -} - -func copyDevice(dst string, fi os.FileInfo) error { - return errors.New("device copy not supported") -} diff --git a/vendor/github.com/containerd/continuity/fs/diff.go b/vendor/github.com/containerd/continuity/fs/diff.go deleted file mode 100644 index e64f9e73..00000000 --- a/vendor/github.com/containerd/continuity/fs/diff.go +++ /dev/null @@ -1,326 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "context" - "os" - "path/filepath" - "strings" - - "golang.org/x/sync/errgroup" - - "github.com/sirupsen/logrus" -) - -// ChangeKind is the type of modification that -// a change is making. -type ChangeKind int - -const ( - // ChangeKindUnmodified represents an unmodified - // file - ChangeKindUnmodified = iota - - // ChangeKindAdd represents an addition of - // a file - ChangeKindAdd - - // ChangeKindModify represents a change to - // an existing file - ChangeKindModify - - // ChangeKindDelete represents a delete of - // a file - ChangeKindDelete -) - -func (k ChangeKind) String() string { - switch k { - case ChangeKindUnmodified: - return "unmodified" - case ChangeKindAdd: - return "add" - case ChangeKindModify: - return "modify" - case ChangeKindDelete: - return "delete" - default: - return "" - } -} - -// Change represents single change between a diff and its parent. -type Change struct { - Kind ChangeKind - Path string -} - -// ChangeFunc is the type of function called for each change -// computed during a directory changes calculation. -type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error - -// Changes computes changes between two directories calling the -// given change function for each computed change. The first -// directory is intended to the base directory and second -// directory the changed directory. -// -// The change callback is called by the order of path names and -// should be appliable in that order. -// Due to this apply ordering, the following is true -// - Removed directory trees only create a single change for the root -// directory removed. Remaining changes are implied. -// - A directory which is modified to become a file will not have -// delete entries for sub-path items, their removal is implied -// by the removal of the parent directory. -// -// Opaque directories will not be treated specially and each file -// removed from the base directory will show up as a removal. -// -// File content comparisons will be done on files which have timestamps -// which may have been truncated. If either of the files being compared -// has a zero value nanosecond value, each byte will be compared for -// differences. If 2 files have the same seconds value but different -// nanosecond values where one of those values is zero, the files will -// be considered unchanged if the content is the same. This behavior -// is to account for timestamp truncation during archiving. -func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error { - if a == "" { - logrus.Debugf("Using single walk diff for %s", b) - return addDirChanges(ctx, changeFn, b) - } else if diffOptions := detectDirDiff(b, a); diffOptions != nil { - logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a) - return diffDirChanges(ctx, changeFn, a, diffOptions) - } - - logrus.Debugf("Using double walk diff for %s from %s", b, a) - return doubleWalkDiff(ctx, changeFn, a, b) -} - -func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error { - return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(root, path) - if err != nil { - return err - } - - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - return changeFn(ChangeKindAdd, path, f, nil) - }) -} - -// diffDirOptions is used when the diff can be directly calculated from -// a diff directory to its base, without walking both trees. -type diffDirOptions struct { - diffDir string - skipChange func(string) (bool, error) - deleteChange func(string, string, os.FileInfo) (string, error) -} - -// diffDirChanges walks the diff directory and compares changes against the base. -func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error { - changedDirs := make(map[string]struct{}) - return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(o.diffDir, path) - if err != nil { - return err - } - - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - // TODO: handle opaqueness, start new double walker at this - // location to get deletes, and skip tree in single walker - - if o.skipChange != nil { - if skip, err := o.skipChange(path); skip { - return err - } - } - - var kind ChangeKind - - deletedFile, err := o.deleteChange(o.diffDir, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - path = deletedFile - kind = ChangeKindDelete - f = nil - } else { - // Otherwise, the file was added - kind = ChangeKindAdd - - // ...Unless it already existed in a base, in which case, it's a modification - stat, err := os.Stat(filepath.Join(base, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the base, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - kind = ChangeKindModify - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if kind == ChangeKindAdd || kind == ChangeKindDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - pi, err := os.Stat(filepath.Join(o.diffDir, parent)) - if err := changeFn(ChangeKindModify, parent, pi, err); err != nil { - return err - } - changedDirs[parent] = struct{}{} - } - } - - return changeFn(kind, path, f, nil) - }) -} - -// doubleWalkDiff walks both directories to create a diff -func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) { - g, ctx := errgroup.WithContext(ctx) - - var ( - c1 = make(chan *currentPath) - c2 = make(chan *currentPath) - - f1, f2 *currentPath - rmdir string - ) - g.Go(func() error { - defer close(c1) - return pathWalk(ctx, a, c1) - }) - g.Go(func() error { - defer close(c2) - return pathWalk(ctx, b, c2) - }) - g.Go(func() error { - for c1 != nil || c2 != nil { - if f1 == nil && c1 != nil { - f1, err = nextPath(ctx, c1) - if err != nil { - return err - } - if f1 == nil { - c1 = nil - } - } - - if f2 == nil && c2 != nil { - f2, err = nextPath(ctx, c2) - if err != nil { - return err - } - if f2 == nil { - c2 = nil - } - } - if f1 == nil && f2 == nil { - continue - } - - var f os.FileInfo - k, p := pathChange(f1, f2) - switch k { - case ChangeKindAdd: - if rmdir != "" { - rmdir = "" - } - f = f2.f - f2 = nil - case ChangeKindDelete: - // Check if this file is already removed by being - // under of a removed directory - if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { - f1 = nil - continue - } else if f1.f.IsDir() { - rmdir = f1.path + string(os.PathSeparator) - } else if rmdir != "" { - rmdir = "" - } - f1 = nil - case ChangeKindModify: - same, err := sameFile(f1, f2) - if err != nil { - return err - } - if f1.f.IsDir() && !f2.f.IsDir() { - rmdir = f1.path + string(os.PathSeparator) - } else if rmdir != "" { - rmdir = "" - } - f = f2.f - f1 = nil - f2 = nil - if same { - if !isLinked(f) { - continue - } - k = ChangeKindUnmodified - } - } - if err := changeFn(k, p, f, nil); err != nil { - return err - } - } - return nil - }) - - return g.Wait() -} diff --git a/vendor/github.com/containerd/continuity/fs/diff_unix.go b/vendor/github.com/containerd/continuity/fs/diff_unix.go deleted file mode 100644 index 7913af27..00000000 --- a/vendor/github.com/containerd/continuity/fs/diff_unix.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "bytes" - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" -) - -// detectDirDiff returns diff dir options if a directory could -// be found in the mount info for upper which is the direct -// diff with the provided lower directory -func detectDirDiff(upper, lower string) *diffDirOptions { - // TODO: get mount options for upper - // TODO: detect AUFS - // TODO: detect overlay - return nil -} - -// compareSysStat returns whether the stats are equivalent, -// whether the files are considered the same file, and -// an error -func compareSysStat(s1, s2 interface{}) (bool, error) { - ls1, ok := s1.(*syscall.Stat_t) - if !ok { - return false, nil - } - ls2, ok := s2.(*syscall.Stat_t) - if !ok { - return false, nil - } - - return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil -} - -func compareCapabilities(p1, p2 string) (bool, error) { - c1, err := sysx.LGetxattr(p1, "security.capability") - if err != nil && err != sysx.ENODATA { - return false, errors.Wrapf(err, "failed to get xattr for %s", p1) - } - c2, err := sysx.LGetxattr(p2, "security.capability") - if err != nil && err != sysx.ENODATA { - return false, errors.Wrapf(err, "failed to get xattr for %s", p2) - } - return bytes.Equal(c1, c2), nil -} - -func isLinked(f os.FileInfo) bool { - s, ok := f.Sys().(*syscall.Stat_t) - if !ok { - return false - } - return !f.IsDir() && s.Nlink > 1 -} diff --git a/vendor/github.com/containerd/continuity/fs/diff_windows.go b/vendor/github.com/containerd/continuity/fs/diff_windows.go deleted file mode 100644 index 4bfa72d3..00000000 --- a/vendor/github.com/containerd/continuity/fs/diff_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "os" - - "golang.org/x/sys/windows" -) - -func detectDirDiff(upper, lower string) *diffDirOptions { - return nil -} - -func compareSysStat(s1, s2 interface{}) (bool, error) { - f1, ok := s1.(windows.Win32FileAttributeData) - if !ok { - return false, nil - } - f2, ok := s2.(windows.Win32FileAttributeData) - if !ok { - return false, nil - } - return f1.FileAttributes == f2.FileAttributes, nil -} - -func compareCapabilities(p1, p2 string) (bool, error) { - // TODO: Use windows equivalent - return true, nil -} - -func isLinked(os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/containerd/continuity/fs/dtype_linux.go b/vendor/github.com/containerd/continuity/fs/dtype_linux.go deleted file mode 100644 index 10510d8d..00000000 --- a/vendor/github.com/containerd/continuity/fs/dtype_linux.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "fmt" - "io/ioutil" - "os" - "syscall" - "unsafe" -) - -func locateDummyIfEmpty(path string) (string, error) { - children, err := ioutil.ReadDir(path) - if err != nil { - return "", err - } - if len(children) != 0 { - return "", nil - } - dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") - if err != nil { - return "", err - } - name := dummyFile.Name() - err = dummyFile.Close() - return name, err -} - -// SupportsDType returns whether the filesystem mounted on path supports d_type -func SupportsDType(path string) (bool, error) { - // locate dummy so that we have at least one dirent - dummy, err := locateDummyIfEmpty(path) - if err != nil { - return false, err - } - if dummy != "" { - defer os.Remove(dummy) - } - - visited := 0 - supportsDType := true - fn := func(ent *syscall.Dirent) bool { - visited++ - if ent.Type == syscall.DT_UNKNOWN { - supportsDType = false - // stop iteration - return true - } - // continue iteration - return false - } - if err = iterateReadDir(path, fn); err != nil { - return false, err - } - if visited == 0 { - return false, fmt.Errorf("did not hit any dirent during iteration %s", path) - } - return supportsDType, nil -} - -func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { - d, err := os.Open(path) - if err != nil { - return err - } - defer d.Close() - fd := int(d.Fd()) - buf := make([]byte, 4096) - for { - nbytes, err := syscall.ReadDirent(fd, buf) - if err != nil { - return err - } - if nbytes == 0 { - break - } - for off := 0; off < nbytes; { - ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) - if stop := fn(ent); stop { - return nil - } - off += int(ent.Reclen) - } - } - return nil -} diff --git a/vendor/github.com/containerd/continuity/fs/du.go b/vendor/github.com/containerd/continuity/fs/du.go deleted file mode 100644 index fccc985d..00000000 --- a/vendor/github.com/containerd/continuity/fs/du.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import "context" - -// Usage of disk information -type Usage struct { - Inodes int64 - Size int64 -} - -// DiskUsage counts the number of inodes and disk usage for the resources under -// path. -func DiskUsage(ctx context.Context, roots ...string) (Usage, error) { - return diskUsage(ctx, roots...) -} - -// DiffUsage counts the numbers of inodes and disk usage in the -// diff between the 2 directories. The first path is intended -// as the base directory and the second as the changed directory. -func DiffUsage(ctx context.Context, a, b string) (Usage, error) { - return diffUsage(ctx, a, b) -} diff --git a/vendor/github.com/containerd/continuity/fs/du_unix.go b/vendor/github.com/containerd/continuity/fs/du_unix.go deleted file mode 100644 index e22ffbea..00000000 --- a/vendor/github.com/containerd/continuity/fs/du_unix.go +++ /dev/null @@ -1,110 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "context" - "os" - "path/filepath" - "syscall" -) - -type inode struct { - // TODO(stevvooe): Can probably reduce memory usage by not tracking - // device, but we can leave this right for now. - dev, ino uint64 -} - -func newInode(stat *syscall.Stat_t) inode { - return inode{ - // Dev is uint32 on darwin/bsd, uint64 on linux/solaris - dev: uint64(stat.Dev), // nolint: unconvert - // Ino is uint32 on bsd, uint64 on darwin/linux/solaris - ino: uint64(stat.Ino), // nolint: unconvert - } -} - -func diskUsage(ctx context.Context, roots ...string) (Usage, error) { - - var ( - size int64 - inodes = map[inode]struct{}{} // expensive! - ) - - for _, root := range roots { - if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - inoKey := newInode(fi.Sys().(*syscall.Stat_t)) - if _, ok := inodes[inoKey]; !ok { - inodes[inoKey] = struct{}{} - size += fi.Size() - } - - return nil - }); err != nil { - return Usage{}, err - } - } - - return Usage{ - Inodes: int64(len(inodes)), - Size: size, - }, nil -} - -func diffUsage(ctx context.Context, a, b string) (Usage, error) { - var ( - size int64 - inodes = map[inode]struct{}{} // expensive! - ) - - if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - if kind == ChangeKindAdd || kind == ChangeKindModify { - inoKey := newInode(fi.Sys().(*syscall.Stat_t)) - if _, ok := inodes[inoKey]; !ok { - inodes[inoKey] = struct{}{} - size += fi.Size() - } - - return nil - - } - return nil - }); err != nil { - return Usage{}, err - } - - return Usage{ - Inodes: int64(len(inodes)), - Size: size, - }, nil -} diff --git a/vendor/github.com/containerd/continuity/fs/du_windows.go b/vendor/github.com/containerd/continuity/fs/du_windows.go deleted file mode 100644 index 8f25ec59..00000000 --- a/vendor/github.com/containerd/continuity/fs/du_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "context" - "os" - "path/filepath" -) - -func diskUsage(ctx context.Context, roots ...string) (Usage, error) { - var ( - size int64 - ) - - // TODO(stevvooe): Support inodes (or equivalent) for windows. - - for _, root := range roots { - if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - size += fi.Size() - return nil - }); err != nil { - return Usage{}, err - } - } - - return Usage{ - Size: size, - }, nil -} - -func diffUsage(ctx context.Context, a, b string) (Usage, error) { - var ( - size int64 - ) - - if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - if kind == ChangeKindAdd || kind == ChangeKindModify { - size += fi.Size() - - return nil - - } - return nil - }); err != nil { - return Usage{}, err - } - - return Usage{ - Size: size, - }, nil -} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink.go b/vendor/github.com/containerd/continuity/fs/hardlink.go deleted file mode 100644 index 762aa45e..00000000 --- a/vendor/github.com/containerd/continuity/fs/hardlink.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import "os" - -// GetLinkInfo returns an identifier representing the node a hardlink is pointing -// to. If the file is not hard linked then 0 will be returned. -func GetLinkInfo(fi os.FileInfo) (uint64, bool) { - return getLinkInfo(fi) -} - -// getLinkSource returns a path for the given name and -// file info to its link source in the provided inode -// map. If the given file name is not in the map and -// has other links, it is added to the inode map -// to be a source for other link locations. -func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { - inode, isHardlink := getLinkInfo(fi) - if !isHardlink { - return "", nil - } - - path, ok := inodes[inode] - if !ok { - inodes[inode] = name - } - return path, nil -} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_unix.go b/vendor/github.com/containerd/continuity/fs/hardlink_unix.go deleted file mode 100644 index f95f0904..00000000 --- a/vendor/github.com/containerd/continuity/fs/hardlink_unix.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "os" - "syscall" -) - -func getLinkInfo(fi os.FileInfo) (uint64, bool) { - s, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return 0, false - } - - // Ino is uint32 on bsd, uint64 on darwin/linux/solaris - return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert -} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_windows.go b/vendor/github.com/containerd/continuity/fs/hardlink_windows.go deleted file mode 100644 index 74855471..00000000 --- a/vendor/github.com/containerd/continuity/fs/hardlink_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import "os" - -func getLinkInfo(fi os.FileInfo) (uint64, bool) { - return 0, false -} diff --git a/vendor/github.com/containerd/continuity/fs/path.go b/vendor/github.com/containerd/continuity/fs/path.go deleted file mode 100644 index 8863caa9..00000000 --- a/vendor/github.com/containerd/continuity/fs/path.go +++ /dev/null @@ -1,313 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "bytes" - "context" - "io" - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -var ( - errTooManyLinks = errors.New("too many links") -) - -type currentPath struct { - path string - f os.FileInfo - fullPath string -} - -func pathChange(lower, upper *currentPath) (ChangeKind, string) { - if lower == nil { - if upper == nil { - panic("cannot compare nil paths") - } - return ChangeKindAdd, upper.path - } - if upper == nil { - return ChangeKindDelete, lower.path - } - - switch i := directoryCompare(lower.path, upper.path); { - case i < 0: - // File in lower that is not in upper - return ChangeKindDelete, lower.path - case i > 0: - // File in upper that is not in lower - return ChangeKindAdd, upper.path - default: - return ChangeKindModify, upper.path - } -} - -func directoryCompare(a, b string) int { - l := len(a) - if len(b) < l { - l = len(b) - } - for i := 0; i < l; i++ { - c1, c2 := a[i], b[i] - if c1 == filepath.Separator { - c1 = byte(0) - } - if c2 == filepath.Separator { - c2 = byte(0) - } - if c1 < c2 { - return -1 - } - if c1 > c2 { - return +1 - } - } - if len(a) < len(b) { - return -1 - } - if len(a) > len(b) { - return +1 - } - return 0 -} - -func sameFile(f1, f2 *currentPath) (bool, error) { - if os.SameFile(f1.f, f2.f) { - return true, nil - } - - equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys()) - if err != nil || !equalStat { - return equalStat, err - } - - if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq { - return eq, err - } - - // If not a directory also check size, modtime, and content - if !f1.f.IsDir() { - if f1.f.Size() != f2.f.Size() { - return false, nil - } - t1 := f1.f.ModTime() - t2 := f2.f.ModTime() - - if t1.Unix() != t2.Unix() { - return false, nil - } - - // If the timestamp may have been truncated in both of the - // files, check content of file to determine difference - if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 { - var eq bool - if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink { - eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath) - } else if f1.f.Size() > 0 { - eq, err = compareFileContent(f1.fullPath, f2.fullPath) - } - if err != nil || !eq { - return eq, err - } - } else if t1.Nanosecond() != t2.Nanosecond() { - return false, nil - } - } - - return true, nil -} - -func compareSymlinkTarget(p1, p2 string) (bool, error) { - t1, err := os.Readlink(p1) - if err != nil { - return false, err - } - t2, err := os.Readlink(p2) - if err != nil { - return false, err - } - return t1 == t2, nil -} - -const compareChuckSize = 32 * 1024 - -// compareFileContent compares the content of 2 same sized files -// by comparing each byte. -func compareFileContent(p1, p2 string) (bool, error) { - f1, err := os.Open(p1) - if err != nil { - return false, err - } - defer f1.Close() - f2, err := os.Open(p2) - if err != nil { - return false, err - } - defer f2.Close() - - b1 := make([]byte, compareChuckSize) - b2 := make([]byte, compareChuckSize) - for { - n1, err1 := f1.Read(b1) - if err1 != nil && err1 != io.EOF { - return false, err1 - } - n2, err2 := f2.Read(b2) - if err2 != nil && err2 != io.EOF { - return false, err2 - } - if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) { - return false, nil - } - if err1 == io.EOF && err2 == io.EOF { - return true, nil - } - } -} - -func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error { - return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(root, path) - if err != nil { - return err - } - - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - p := ¤tPath{ - path: path, - f: f, - fullPath: filepath.Join(root, path), - } - - select { - case <-ctx.Done(): - return ctx.Err() - case pathC <- p: - return nil - } - }) -} - -func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case p := <-pathC: - return p, nil - } -} - -// RootPath joins a path with a root, evaluating and bounding any -// symlink to the root directory. -func RootPath(root, path string) (string, error) { - if path == "" { - return root, nil - } - var linksWalked int // to protect against cycles - for { - i := linksWalked - newpath, err := walkLinks(root, path, &linksWalked) - if err != nil { - return "", err - } - path = newpath - if i == linksWalked { - newpath = filepath.Join("/", newpath) - if path == newpath { - return filepath.Join(root, newpath), nil - } - path = newpath - } - } -} - -func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) { - if *linksWalked > 255 { - return "", false, errTooManyLinks - } - - path = filepath.Join("/", path) - if path == "/" { - return path, false, nil - } - realPath := filepath.Join(root, path) - - fi, err := os.Lstat(realPath) - if err != nil { - // If path does not yet exist, treat as non-symlink - if os.IsNotExist(err) { - return path, false, nil - } - return "", false, err - } - if fi.Mode()&os.ModeSymlink == 0 { - return path, false, nil - } - newpath, err = os.Readlink(realPath) - if err != nil { - return "", false, err - } - *linksWalked++ - return newpath, true, nil -} - -func walkLinks(root, path string, linksWalked *int) (string, error) { - switch dir, file := filepath.Split(path); { - case dir == "": - newpath, _, err := walkLink(root, file, linksWalked) - return newpath, err - case file == "": - if os.IsPathSeparator(dir[len(dir)-1]) { - if dir == "/" { - return dir, nil - } - return walkLinks(root, dir[:len(dir)-1], linksWalked) - } - newpath, _, err := walkLink(root, dir, linksWalked) - return newpath, err - default: - newdir, err := walkLinks(root, dir, linksWalked) - if err != nil { - return "", err - } - newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked) - if err != nil { - return "", err - } - if !islink { - return newpath, nil - } - if filepath.IsAbs(newpath) { - return newpath, nil - } - return filepath.Join(newdir, newpath), nil - } -} diff --git a/vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go b/vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go deleted file mode 100644 index cb7400a3..00000000 --- a/vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build darwin freebsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "syscall" - "time" -) - -// StatAtime returns the access time from a stat struct -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atimespec -} - -// StatCtime returns the created time from a stat struct -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctimespec -} - -// StatMtime returns the modified time from a stat struct -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtimespec -} - -// StatATimeAsTime returns the access time as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go b/vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go deleted file mode 100644 index c68df6e5..00000000 --- a/vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build linux openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - // The int64 conversions ensure the line compiles for 32-bit systems as well. - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/continuity/fs/time.go b/vendor/github.com/containerd/continuity/fs/time.go deleted file mode 100644 index cde45612..00000000 --- a/vendor/github.com/containerd/continuity/fs/time.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import "time" - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} diff --git a/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go b/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go deleted file mode 100644 index 0bfa6a04..00000000 --- a/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package syscallx - -import "syscall" - -// Readlink returns the destination of the named symbolic link. -func Readlink(path string, buf []byte) (n int, err error) { - return syscall.Readlink(path, buf) -} diff --git a/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go b/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go deleted file mode 100644 index 2ba81499..00000000 --- a/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package syscallx - -import ( - "syscall" - "unsafe" -) - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - - // GenericReparseBuffer - reparseBuffer byte -} - -type mountPointReparseBuffer struct { - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 - PathBuffer [1]uint16 -} - -type symbolicLinkReparseBuffer struct { - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 - Flags uint32 - PathBuffer [1]uint16 -} - -const ( - _IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 - _SYMLINK_FLAG_RELATIVE = 1 -) - -// Readlink returns the destination of the named symbolic link. -func Readlink(path string, buf []byte) (n int, err error) { - fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), syscall.GENERIC_READ, 0, nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) - if err != nil { - return -1, err - } - defer syscall.CloseHandle(fd) - - rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE) - var bytesReturned uint32 - err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) - if err != nil { - return -1, err - } - - rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) - var s string - switch rdb.ReparseTag { - case syscall.IO_REPARSE_TAG_SYMLINK: - data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) - p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) - s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2]) - if data.Flags&_SYMLINK_FLAG_RELATIVE == 0 { - if len(s) >= 4 && s[:4] == `\??\` { - s = s[4:] - switch { - case len(s) >= 2 && s[1] == ':': // \??\C:\foo\bar - // do nothing - case len(s) >= 4 && s[:4] == `UNC\`: // \??\UNC\foo\bar - s = `\\` + s[4:] - default: - // unexpected; do nothing - } - } else { - // unexpected; do nothing - } - } - case _IO_REPARSE_TAG_MOUNT_POINT: - data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) - p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) - s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2]) - if len(s) >= 4 && s[:4] == `\??\` { // \??\C:\foo\bar - if len(s) < 48 || s[:11] != `\??\Volume{` { - s = s[4:] - } - } else { - // unexpected; do nothing - } - default: - // the path is not a symlink or junction but another type of reparse - // point - return -1, syscall.ENOENT - } - n = copy(buf, []byte(s)) - - return n, nil -} diff --git a/vendor/github.com/containerd/continuity/sysx/README.md b/vendor/github.com/containerd/continuity/sysx/README.md deleted file mode 100644 index ad7aee53..00000000 --- a/vendor/github.com/containerd/continuity/sysx/README.md +++ /dev/null @@ -1,3 +0,0 @@ -This package is for internal use only. It is intended to only have -temporary changes before they are upstreamed to golang.org/x/sys/ -(a.k.a. https://github.com/golang/sys). diff --git a/vendor/github.com/containerd/continuity/sysx/file_posix.go b/vendor/github.com/containerd/continuity/sysx/file_posix.go deleted file mode 100644 index e28f3a1b..00000000 --- a/vendor/github.com/containerd/continuity/sysx/file_posix.go +++ /dev/null @@ -1,128 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "os" - "path/filepath" - - "github.com/containerd/continuity/syscallx" -) - -// Readlink returns the destination of the named symbolic link. -// If there is an error, it will be of type *PathError. -func Readlink(name string) (string, error) { - for len := 128; ; len *= 2 { - b := make([]byte, len) - n, e := fixCount(syscallx.Readlink(fixLongPath(name), b)) - if e != nil { - return "", &os.PathError{Op: "readlink", Path: name, Err: e} - } - if n < len { - return string(b[0:n]), nil - } - } -} - -// Many functions in package syscall return a count of -1 instead of 0. -// Using fixCount(call()) instead of call() corrects the count. -func fixCount(n int, err error) (int, error) { - if n < 0 { - n = 0 - } - return n, err -} - -// fixLongPath returns the extended-length (\\?\-prefixed) form of -// path when needed, in order to avoid the default 260 character file -// path limit imposed by Windows. If path is not easily converted to -// the extended-length form (for example, if path is a relative path -// or contains .. elements), or is short enough, fixLongPath returns -// path unmodified. -// -// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath -func fixLongPath(path string) string { - // Do nothing (and don't allocate) if the path is "short". - // Empirically (at least on the Windows Server 2013 builder), - // the kernel is arbitrarily okay with < 248 bytes. That - // matches what the docs above say: - // "When using an API to create a directory, the specified - // path cannot be so long that you cannot append an 8.3 file - // name (that is, the directory name cannot exceed MAX_PATH - // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. - // - // The MSDN docs appear to say that a normal path that is 248 bytes long - // will work; empirically the path must be less then 248 bytes long. - if len(path) < 248 { - // Don't fix. (This is how Go 1.7 and earlier worked, - // not automatically generating the \\?\ form) - return path - } - - // The extended form begins with \\?\, as in - // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. - // The extended form disables evaluation of . and .. path - // elements and disables the interpretation of / as equivalent - // to \. The conversion here rewrites / to \ and elides - // . elements as well as trailing or duplicate separators. For - // simplicity it avoids the conversion entirely for relative - // paths or paths containing .. elements. For now, - // \\server\share paths are not converted to - // \\?\UNC\server\share paths because the rules for doing so - // are less well-specified. - if len(path) >= 2 && path[:2] == `\\` { - // Don't canonicalize UNC paths. - return path - } - if !filepath.IsAbs(path) { - // Relative path - return path - } - - const prefix = `\\?` - - pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) - copy(pathbuf, prefix) - n := len(path) - r, w := 0, len(prefix) - for r < n { - switch { - case os.IsPathSeparator(path[r]): - // empty block - r++ - case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): - // /./ - r++ - case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): - // /../ is currently unhandled - return path - default: - pathbuf[w] = '\\' - w++ - for ; r < n && !os.IsPathSeparator(path[r]); r++ { - pathbuf[w] = path[r] - w++ - } - } - } - // A drive's root directory needs a trailing \ - if w == len(`\\?\c:`) { - pathbuf[w] = '\\' - w++ - } - return string(pathbuf[:w]) -} diff --git a/vendor/github.com/containerd/continuity/sysx/generate.sh b/vendor/github.com/containerd/continuity/sysx/generate.sh deleted file mode 100644 index 87d708d7..00000000 --- a/vendor/github.com/containerd/continuity/sysx/generate.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -# Copyright The containerd Authors. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -set -e - -mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl" - -fix() { - sed 's,^package syscall$,package sysx,' \ - | sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \ - | gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \ - | gofmt -r='Syscall6 -> syscall.Syscall6' \ - | gofmt -r='Syscall -> syscall.Syscall' \ - | gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \ - | gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \ - | gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \ - | gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \ - | gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \ - | gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \ - | gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \ - | gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR' -} - -if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then - echo "Must specify \$GOARCH and \$GOOS" - exit 1 -fi - -mkargs="" - -if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then - mkargs="-l32" -fi - -for f in "$@"; do - $mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go" -done - diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_linux.go b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go deleted file mode 100644 index 28ce5d8d..00000000 --- a/vendor/github.com/containerd/continuity/sysx/nodata_linux.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "syscall" -) - -const ENODATA = syscall.ENODATA diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go deleted file mode 100644 index e0575f44..00000000 --- a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "syscall" -) - -// This should actually be a set that contains ENOENT and EPERM -const ENODATA = syscall.ENOENT diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_unix.go b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go deleted file mode 100644 index de4b3d50..00000000 --- a/vendor/github.com/containerd/continuity/sysx/nodata_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build darwin freebsd openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "syscall" -) - -const ENODATA = syscall.ENOATTR diff --git a/vendor/github.com/containerd/continuity/sysx/xattr.go b/vendor/github.com/containerd/continuity/sysx/xattr.go deleted file mode 100644 index db6fe70f..00000000 --- a/vendor/github.com/containerd/continuity/sysx/xattr.go +++ /dev/null @@ -1,117 +0,0 @@ -// +build linux darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "bytes" - - "golang.org/x/sys/unix" -) - -// Listxattr calls syscall listxattr and reads all content -// and returns a string array -func Listxattr(path string) ([]string, error) { - return listxattrAll(path, unix.Listxattr) -} - -// Removexattr calls syscall removexattr -func Removexattr(path string, attr string) (err error) { - return unix.Removexattr(path, attr) -} - -// Setxattr calls syscall setxattr -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - return unix.Setxattr(path, attr, data, flags) -} - -// Getxattr calls syscall getxattr -func Getxattr(path, attr string) ([]byte, error) { - return getxattrAll(path, attr, unix.Getxattr) -} - -// LListxattr lists xattrs, not following symlinks -func LListxattr(path string) ([]string, error) { - return listxattrAll(path, unix.Llistxattr) -} - -// LRemovexattr removes an xattr, not following symlinks -func LRemovexattr(path string, attr string) (err error) { - return unix.Lremovexattr(path, attr) -} - -// LSetxattr sets an xattr, not following symlinks -func LSetxattr(path string, attr string, data []byte, flags int) (err error) { - return unix.Lsetxattr(path, attr, data, flags) -} - -// LGetxattr gets an xattr, not following symlinks -func LGetxattr(path, attr string) ([]byte, error) { - return getxattrAll(path, attr, unix.Lgetxattr) -} - -const defaultXattrBufferSize = 128 - -type listxattrFunc func(path string, dest []byte) (int, error) - -func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) { - buf := make([]byte, defaultXattrBufferSize) - n, err := listFunc(path, buf) - for err == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - n, err = listFunc(path, []byte{}) - if err != nil { - return nil, err - } - buf = make([]byte, n) - n, err = listFunc(path, buf) - } - if err != nil { - return nil, err - } - - ps := bytes.Split(bytes.TrimSuffix(buf[:n], []byte{0}), []byte{0}) - var entries []string - for _, p := range ps { - if len(p) > 0 { - entries = append(entries, string(p)) - } - } - - return entries, nil -} - -type getxattrFunc func(string, string, []byte) (int, error) - -func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) { - buf := make([]byte, defaultXattrBufferSize) - n, err := getFunc(path, attr, buf) - for err == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - n, err = getFunc(path, attr, []byte{}) - if err != nil { - return nil, err - } - buf = make([]byte, n) - n, err = getFunc(path, attr, buf) - } - if err != nil { - return nil, err - } - return buf[:n], nil -} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go deleted file mode 100644 index c9ef3a1d..00000000 --- a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build !linux,!darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "errors" - "runtime" -) - -var unsupported = errors.New("extended attributes unsupported on " + runtime.GOOS) - -// Listxattr calls syscall listxattr and reads all content -// and returns a string array -func Listxattr(path string) ([]string, error) { - return []string{}, nil -} - -// Removexattr calls syscall removexattr -func Removexattr(path string, attr string) (err error) { - return unsupported -} - -// Setxattr calls syscall setxattr -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - return unsupported -} - -// Getxattr calls syscall getxattr -func Getxattr(path, attr string) ([]byte, error) { - return []byte{}, unsupported -} - -// LListxattr lists xattrs, not following symlinks -func LListxattr(path string) ([]string, error) { - return []string{}, nil -} - -// LRemovexattr removes an xattr, not following symlinks -func LRemovexattr(path string, attr string) (err error) { - return unsupported -} - -// LSetxattr sets an xattr, not following symlinks -func LSetxattr(path string, attr string, data []byte, flags int) (err error) { - return unsupported -} - -// LGetxattr gets an xattr, not following symlinks -func LGetxattr(path, attr string) ([]byte, error) { - return []byte{}, nil -} diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE deleted file mode 100644 index e06d2081..00000000 --- a/vendor/github.com/docker/distribution/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go deleted file mode 100644 index 71327dca..00000000 --- a/vendor/github.com/docker/distribution/digestset/set.go +++ /dev/null @@ -1,247 +0,0 @@ -package digestset - -import ( - "errors" - "sort" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg digest.Algorithm - hex string - ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []digest.Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg digest.Algorithm - val string - digest digest.Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go deleted file mode 100644 index 978df7ea..00000000 --- a/vendor/github.com/docker/distribution/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go deleted file mode 100644 index b3dfb7a6..00000000 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ /dev/null @@ -1,199 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digestset" - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// ParseDockerRef normalizes the image reference following the docker convention. This is added -// mainly for backward compatibility. -// The reference returned can only be either tagged or digested. For reference contains both tag -// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ -// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if _, ok := named.(NamedTagged); ok { - if canonical, ok := named.(Canonical); ok { - // The reference is both tagged and digested, only - // return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - newCanonical, err := WithDigest(newNamed, canonical.Digest()) - if err != nil { - return nil, err - } - return newCanonical, nil - } - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go deleted file mode 100644 index 8c0c23b2..00000000 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// DEPRECATED: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go deleted file mode 100644 index 78603493..00000000 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ /dev/null @@ -1,143 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index 4c35b879..00000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr := daErr.(type) { - case ErrorCode: - err = daErr.WithDetail(nil) - case Error: - err = daErr - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index d77e7047..00000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,40 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - return json.NewEncoder(w).Encode(err) -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index d1e8826c..00000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index d5b6cbbe..00000000 --- a/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,2082 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Hnatiw -Aaron Huslage -Aaron L. Xu -Aaron Lehmann -Aaron Welch -Aaron.L.Xu -Abel Muiño -Abhijeet Kasurde -Abhinandan Prativadi -Abhinav Ajgaonkar -Abhishek Chanda -Abhishek Sharma -Abin Shahab -Adam Avilla -Adam Eijdenberg -Adam Kunk -Adam Miller -Adam Mills -Adam Pointer -Adam Singer -Adam Walz -Addam Hardy -Aditi Rajagopal -Aditya -Adnan Khan -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akash Gupta -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle -Al Tobey -alambike -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Alejandro González Hevia -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Chen -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Goodman -Alex Olshansky -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Midlash -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre Garnier -Alexandre González -Alexandre Jomin -Alexandru Sfirlogea -Alexei Margasov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Alfred Landrum -Ali Dehghani -Alicia Lauerman -Alihan Demir -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Deng -Alvin Richards -amangoel -Amen Belayneh -Amir Goldstein -Amit Bakshi -Amit Krishnan -Amit Shukla -Amr Gawish -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anchal Agrawal -Anda Xu -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Luzzardi -Andrea Turli -Andreas Elvers -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrei Vagin -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew He -Andrew Hsu -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew McDonnell -Andrew Munsell -Andrew Pennebaker -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Kolomentsev -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Anil Madhavapeddy -Ankit Jain -Ankush Agarwal -Anonmily -Anran Qiao -Anshul Pundir -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anthony Sottile -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -Arash Deshmeh -ArikaChen -Arnaud Lefebvre -Arnaud Porterie -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asad Saeeduddin -Asbjørn Enge -averagehuman -Avi Das -Avi Kivity -Avi Miller -Avi Vaid -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Bonnefoy -Ben Firshman -Ben Golub -Ben Gould -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Yolken -Benoit Chesneau -Bernerd Schaefer -Bernhard M. Wiedemann -Bert Goethals -Bevisy Zhang -Bharath Thiruveedula -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Bily Zhang -Bin Liu -Bingshen Wang -Blake Geno -Boaz Shuster -bobby abbott -Boris Pruessmann -Boshi Lian -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brett Randall -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Schwind -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Brielle Broder -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bruno Tavares -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlo Mion -Carlos Alexandro Becker -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Catalin Pirvu -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Drage -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Chuanliang -Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengfei Shang -Chengguang Xu -chenyuzhu -Chetan Birajdar -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dias -Chris Dituri -Chris Fordham -Chris Gavin -Chris Gibson -Chris Khoo -Chris McKinnel -Chris McKinnel -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Telfer -Chris Wahl -Chris Weyl -Chris White -Christian Berendt -Christian Brauner -Christian Böhme -Christian Muehlhaeuser -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -Christophe Mehay -Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Cody Roseborough -Coenraad Loubser -Colin Dunklau -Colin Hebert -Colin Panisset -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Corbin Coleman -Corey Farrell -Cory Forsyth -cressie176 -CrimsonGlory -Cristian Staretu -cristiano balducci -Cruceru Calin-Cristian -CUI Wei -Cyprian Gracz -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Williams -Dani Hodovic -Dani Louca -Daniel Antlinger -Daniel Dao -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Grunwell -Daniel Hiltgen -Daniel J Walsh -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel Watkins -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Danny Berger -Danny Yates -Danyal Khaliq -Darren Coxall -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Davanum Srinivas -Dave Barboza -Dave Goodchild -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Calavera -David Chung -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Glasser -David Lawrence -David Lechner -David M. Karr -David Mackey -David Mat -David Mcanulty -David McKay -David P Hilton -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Wang <00107082@163.com> -David Williamson -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -Debayan De -Deborah Gertrude Digges -deed02392 -Deep Debroy -Deng Guangxing -Deni Bertovic -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Chen -Dennis Chen -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devvyn Murphy -Dharmit Shah -Dhawal Yogesh Bhanushali -Diego Romero -Diego Siqueira -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Mandalidis -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Ding Fei -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Kononenko -Dmitry Sharshakov -Dmitry Shyshkin -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominik Dingel -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donghwa Kim -Donovan Jones -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Douglas Curtis -Dr Nic Williams -dragon788 -Dražen Lučanin -Drew Erny -Drew Hubl -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Davtyan -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -epeterso -Eric Barch -Eric Curtin -Eric G. Noriega -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Soderstrom -Eric Yang -Eric-Olivier Lamey -Erica Windisch -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik St. Martin -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Ethan Bell -Euan Kemp -Eugen Krizo -Eugene Yakubovich -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evelyn Xu -Everett Toews -Evgeny Shmarnev -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Ezra Silvera -Fabian Kramm -Fabian Lauer -Fabian Raetz -Fabiano Rosas -Fabio Falci -Fabio Kung -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangming Fang -Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felipe Oliveira -Felix Abecassis -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Feng Yan -Fengtu Wang -Ferenc Szabo -Fernando -Fero Volar -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -Flavio Castelli -Flavio Crisciani -Florian -Florian Klein -Florian Maier -Florian Noeding -Florian Weingarten -Florin Asavoaie -Florin Patan -fonglh -Foysal Iqbal -Francesc Campoy -Francesco Mari -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Frieder Bluemle -Fu JinLin -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Linder -Gabriel Monroy -Gabriel Nicolas Avellaneda -Gaetan de Villele -Galen Sampson -Gang Qiao -Gareth Rushgrove -Garrett Barboza -Gary Schaetz -Gaurav -gautam, prasanna -Gaël PORTAY -Genki Takiuchi -GennadySpb -Geoffrey Bachelet -Geon Kim -George Kontridze -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini -Gianluca Borello -Gildas Cuisinier -Giovan Isa Musthofa -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Gopikannan Venugopalsamy -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Millar -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Pflaum -Greg Stephens -Greg Thornton -Grzegorz Jaśkiewicz -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Guri -Gurjeet Singh -Guruprasad -Gustav Sinder -gwx296173 -Günter Zöchbauer -haikuoliu -Hakan Özler -Hamish Hutchings -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harley Laue -Harold Cooper -Harrison Turton -Harry Zhang -Harshal Patil -Harshal Patil -He Simei -He Xiaoxi -He Xin -heartlock <21521209@zju.edu.cn> -Hector Castro -Helen Xie -Henning Sprang -Hiroshi Hatake -Hiroyuki Sasagawa -Hobofan -Hollie Teal -Hong Xu -Hongbin Lu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -Hyzhou Zhy -Iago López Galeiras -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Campbell -Ian Chen -Ian Lee -Ian Main -Ian Philpot -Ian Truslove -Iavael -Icaro Seara -Ignacio Capurro -Igor Dolzhikov -Igor Karpovich -Iliana Weller -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -Ilya Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Innovimax -Isaac Dupree -Isabel Jimenez -Isao Jonas -Iskander Sharipov -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -Ivan Markin -J Bruni -J. Nunn -Jack Danger Canty -Jack Laxson -Jacob Atzen -Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaivish Kothari -Jake Champlin -Jake Moshenko -Jake Sanders -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nesbitt -James Nugent -James Turnbull -James Watkins-Harvey -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean Rouge -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Christophe Berthon -Jean-Paul Calderone -Jean-Pierre Huynh -Jean-Tiare Le Bigot -Jeeva S. Chelladhurai -Jeff Anderson -Jeff Hajewski -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Chambers -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -Jhon Honce -Ji.Zhilong -Jian Liao -Jian Zhang -Jiang Jinyang -Jie Luo -Jihyun Hwang -Jilles Oldenbeuving -Jim Alateras -Jim Galasyn -Jim Minter -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -Jimmy Song -Jinsoo Park -Jintao Zhang -Jiri Appl -Jiri Popelka -Jiuyue Ma -Jiří Župka -Joao Fernandes -Joao Trindade -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Harris -John Howard (VM) -John Laswell -John Maguire -John Mulhausen -John OBrien III -John Starks -John Stephens -John Tims -John V. Martinez -John Warwick -John Willis -Jon Johnson -Jon Surrell -Jon Wedaman -Jonas Pfenniger -Jonathan A. Schweder -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Choy -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Jonh Wendell -Joni Sar -Joost Cassee -Jordan Arentsen -Jordan Jennings -Jordan Sissel -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Joseph Rothrock -Josh -Josh Bodah -Josh Bonczkowski -Josh Chorlton -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josh Soref -Josh Wilson -Josiah Kiehl -José Tomás Albornoz -Joyce Jang -JP -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Kassar -Julien Maitrehenry -Julien Pervillé -Julio Montes -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justin Cormack -Justin Force -Justin Menga -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -K. Heller -Kai Blin -Kai Qiang Wu (Kennan) -Kamil Domański -Kamjar Gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Kay Yan -kayrus -Kazuhiro Sera -Ke Li -Ke Xu -Kei Ohmura -Keith Hudgins -Keli Hu -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Ken Reese -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Burke -Kevin Clark -Kevin Feyrer -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Kern -Kevin Menard -Kevin Meredith -Kevin P. Kucharczyk -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin Gribov -Konstantin L -Konstantin Pelykh -Krasi Georgiev -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristina Zabunova -Krystian Wojcicki -Kun Zhang -Kunal Kushwaha -Kunal Tyagi -Kyle Conroy -Kyle Linden -Kyle Wuolle -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Lars-Magnus Skog -Laszlo Meszaros -Laura Frank -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee Chao <932819864@qq.com> -Lee, Meng-Han -leeplay -Lei Gong -Lei Jitang -Len Weincier -Lennie -Leo Gallucci -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Daly -Lewis Marshall -Lewis Peckover -Li Yi -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -Liao Qingwei -Lifubang -Lihua Tang -Lily Guo -limsy -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -liwenqi -lixiaobing10051267 -Liz Zhang -LIZAO LI -Lizzie Dixon <_@lizzie.io> -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Opter -Luca Favatella -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Lucas Molas -Lucas Silvestre -Luciano Mores -Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -Luke Marsden -Lyn -Lynda O'Leary -Lénaïc Huard -Ma Müller -Ma Shimiao -Mabin -Madhan Raj Mookkandy -Madhav Puri -Madhu Venugopal -Mageee -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manjunath A Kumatagi -Mansi Nahar -Manuel Meurer -Manuel Rüger -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino -Marcelo Salazar -Marco Hennings -Marcus Cobden -Marcus Farkas -Marcus Linke -Marcus Martins -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark Jeromin -Mark McGranaghan -Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker -Mark West -Markan Patel -Marko Mikulicic -Marko Tibold -Markus Fix -Markus Kortlang -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Muzatko -Martin Redmond -Mary Anthony -Masahito Zembutsu -Masato Ohba -Masayuki Morita -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Champlon -Mathieu Le Marec - Pasquet -Mathieu Parent -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Richardson -Matt Rickard -Matt Robenolt -Matt Schurenko -Matt Williams -Matthew Heon -Matthew Lapworth -Matthew Mayer -Matthew Mosesohn -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -Mattias Jernberg -Mauricio Garavaglia -mauriyouth -Max Shytikov -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Maximiliano Maccanti -Maxwell -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Mert Yazıcıoğlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Irwin -Michael Käufl -Michael Neale -Michael Nussbaum -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minář -Michal Wieczorek -Michaël Pailloncy -Michał Czeraszkiewicz -Michał Gryko -Michiel de Jong -Mickaël Fortunato -Mickaël Remars -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Casas -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Estes -Mike Gaffney -Mike Goelzer -Mike Leone -Mike Lundy -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miklos Szegedi -Milind Chawre -Miloslav Trmač -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -Mizuki Urushida -mlarcher -Mohammad Banikazemi -Mohammad Nasirifar -Mohammed Aaqib Ansari -Mohit Soni -Moorthy RS -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mrfly -Mrunal Patel -Muayyad Alsadi -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nace Oroz -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Natasha Jarus -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Naveed Jamil -Neal McBurnett -Neil Horman -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick DeCoursin -Nick Irvine -Nick Neisen -Nick Parker -Nick Payne -Nick Russo -Nick Stenning -Nick Stinemates -NickrenREN -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet -Nicolás Hock Isaza -Nigel Poulton -Nik Nyby -Nikhil Chawla -NikolaMandic -Nikolas Garofil -Nikolay Milovanov -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE -noducks -Nolan Darilek -Noriki Nakamura -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Oliver Reason -Olivier Gambier -Olle Jonsson -Olli Janatuinen -Omri Shiv -Oriol Francès -Oskar Niburski -Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos -Paolo G. Giarrusso -Pascal -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Hemmer -Patrick Stapleton -Patrik Cyvoct -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Kehrer -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Paulo Ribeiro -Pavel Lobashov -Pavel Pletenev -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Pawel Konczalski -Peeyush Gupta -Peggy Li -Pei Su -Peng Tao -Penghan Wang -Per Weijnitz -perhapszzy@sina.com -Peter Bourgon -Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Jaffe -Peter Kang -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Švihlík -Phil -Phil Estes -Phil Spitler -Philip Alexander Etling -Philip Monroe -Philipp Gillé -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -phineas -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Pradip Dhara -Prasanna Gautam -Pratik Karki -Prayag Verma -Priya Wadhwa -Projjol Banerji -Przemek Hejman -Pure White -pysqz -Qiang Huang -Qinglan Peng -qudongfang -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Radostin Stoyanov -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Raja Sami -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -RaviTeja Pothana -Ray Tsang -ReadmeCritic -Recursive Madman -Reficul -Regan McCooey -Remi Rampin -Remy Suen -Renato Riccieri Santos Zannon -Renaud Gaubert -Rhys Hiltner -Ri Xu -Ricardo N Feliciano -Rich Moyse -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Schneider -Robert Stern -Robert Terhaar -Robert Wallis -Roberto G. Hashioka -Roberto Muñoz Fernández -Robin Naundorf -Robin Schneider -Robin Speekenbrink -robpc -Rodolfo Carvalho -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Rohit Kapur -Rojin George -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Dudin -Roman Strashkin -Ron Smits -Ron Williams -Rong Gao -Rong Zhang -Rongxiang Song -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Royce Remer -Rozhnov Alexandr -Rudolph Gottesheim -Rui Cao -Rui Lopes -Runshen Zhu -Russ Magee -Ryan Abrams -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan Liu -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Simmen -Ryan Stelly -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -Ryan Zhang -ryancooper7 -RyanDeng -Ryo Nakao -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sakeven Jiang -Salahuddin Khan -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sandeep Bansal -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Sargun Dhillon -Sascha Andres -Sascha Grunert -Satnam Singh -Satoshi Amemiya -Satoshi Tagomori -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean Lee -Sean McIntyre -Sean OMeara -Sean P. Kane -Sean Rodman -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sergii Kabashniuk -Sergio Lopez -Serhat Gülçiçek -SeungUkLee -Sevki Hasirci -Shane Canon -Shane da Silva -Shaun Kaasten -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shayne Wang -Shekhar Gulati -Sheng Yang -Shengbo Song -Shev Yan -Shih-Yuan Lee -Shijiang Wei -Shijun Qin -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -shuai-z -Shukui Yang -Shuwei Hao -Sian Lerk Lau -Sidhartha Mani -sidharthamani -Silas Sewell -Silvan Jegen -Simão Reis -Simei He -Simon Eskildsen -Simon Ferquel -Simon Leinen -Simon Menke -Simon Taranto -Simon Vikstrom -Sindhu S -Sjoerd Langkemper -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Stanislav Bondarenko -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan S. -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephan Spindler -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Desmond -Steve Dougherty -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Hartland -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Subhajit Ghosh -Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sunny Gogoi -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien HOUZÉ -Sébastien Luttringer -Sébastien Stormacq -Tabakhase -Tadej Janež -TAGOMORI Satoshi -tang0th -Tangi Colin -Tatsuki Sugiura -Tatsushi Inagaki -Taylan Isikdemir -Taylor Jones -Ted M. Young -Tehmasp Chaudhri -Tejaswini Duggaraju -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Grainger -Thomas Hansen -Thomas Leonard -Thomas Léveil -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Ti Zhou -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Tim Bart -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Potter -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -Tim Zju <21651152@zju.edu.cn> -timfeirg -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Booth -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom Sweeney -Tom Wilkie -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonny Xu -Tony Abboud -Tony Daws -Tony Miller -toogley -Torstein Husebø -Tõnis Tiigi -tpng -tracylihui <793912329@qq.com> -Trapier Marshall -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -Trevor Sullivan -Trishna Guha -Tristan Carel -Troy Denton -Tycho Andersen -Tyler Brock -Tyler Brown -Tzu-Jung Lee -uhayate -Ulysse Carion -Umesh Yadav -Utz Bacher -vagrant -Vaidas Jablonskis -vanderliang -Veres Lajos -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitaly Ostrosablin -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vlastimil Zeman -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Ward Vandewege -WarheadsSE -Wassim Dhif -Wayne Chang -Wayne Song -Weerasak Chongnguluam -Wei Fu -Wei Wu -Wei-Ting Kuo -weipeng -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenjun Tang -Wenkai Yin -Wentao Zhang -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang -Wes Morgan -Wewang Xiaorenfine -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Martin -William Riancho -William Thurston -WiseTrem -Wolfgang Powisch -Wonjun Kim -xamyzhao -Xian Chaobo -Xianglin Gao -Xianlu Bird -Xiao YongBiao -XiaoBing Jiang -Xiaodong Zhang -Xiaoxi He -Xiaoxu Chen -Xiaoyu Zhang -xichengliudui <1693291525@qq.com> -xiekeyang -Ximo Guanter Gonzálbez -Xinbo Weng -Xinzi Zhou -Xiuming Chen -Xuecong Liao -xuzhaokui -Yadnyawalkya Tale -Yahya -YAMADA Tsuyoshi -Yamasaki Masahide -Yan Feng -Yang Bai -Yang Pengfei -yangchenliang -Yanqiang Miao -Yao Zaiyong -Yassine Tijani -Yasunori Mahata -Yazhong Liu -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongxin Li -Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有勝) -Youcef YEKHLEF -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong -Yuan Sun -Yuanhong Peng -Yue Zhang -Yuhao Fang -Yuichiro Kaneko -Yunxiang Huang -Yurii Rashkovskii -Yusuf Tarık Günaydın -Yves Junqueira -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -ZhangHang -zhangxianwei -Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenhai Gao -Zhenkun Bi -Zhou Hao -Zhoulin Xie -Zhu Guihua -Zhu Kunjia -Zhuoyun Wei -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -Zou Yu -zqh -Zuhayr Elahi -Zunayed Ali -Álex González -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 -徐俊杰 -慕陶 -搏通 -黄艳红00139573 diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE deleted file mode 100644 index 6d8d58fb..00000000 --- a/vendor/github.com/docker/docker/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE deleted file mode 100644 index 0c74e15b..00000000 --- a/vendor/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md deleted file mode 100644 index f136c343..00000000 --- a/vendor/github.com/docker/docker/api/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Working on the Engine API - -The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. - -It consists of various components in this repository: - -- `api/swagger.yaml` A Swagger definition of the API. -- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. -- `cli/` The command-line client. -- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. -- `daemon/` The daemon, which serves the API. - -## Swagger definition - -The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: - -1. Automatically generate documentation. -2. Automatically generate the Go server and client. (A work-in-progress.) -3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. - -## Updating the API documentation - -The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. - -The file is split into two main sections: - -- `definitions`, which defines re-usable objects used in requests and responses -- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) - -To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. - -There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). - -`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. - -## Viewing the API documentation - -When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. - -Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. - -The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go deleted file mode 100644 index aa146cda..00000000 --- a/vendor/github.com/docker/docker/api/common.go +++ /dev/null @@ -1,11 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// Common constants for daemon and client. -const ( - // DefaultVersion of Current REST API - DefaultVersion = "1.40" - - // NoBaseImageSpecifier is the symbol used by the FROM - // command to specify that no base image is to be used. - NoBaseImageSpecifier = "scratch" -) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go deleted file mode 100644 index 504b0c90..00000000 --- a/vendor/github.com/docker/docker/api/common_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows - -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go deleted file mode 100644 index 590ba547..00000000 --- a/vendor/github.com/docker/docker/api/common_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -// Technically the first daemon API version released on Windows is v1.25 in -// engine version 1.13. However, some clients are explicitly using downlevel -// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. -// Hence also allowing 1.24 on Windows. -const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml deleted file mode 100644 index f07a0273..00000000 --- a/vendor/github.com/docker/docker/api/swagger-gen.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -layout: - models: - - name: definition - source: asset:model - target: "{{ joinFilePath .Target .ModelPackage }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" - operations: - - name: handler - source: asset:serverOperation - target: "{{ joinFilePath .Target .APIPackage .Package }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml deleted file mode 100644 index 76d21509..00000000 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ /dev/null @@ -1,10414 +0,0 @@ -# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. -# -# This is used for generating API documentation and the types used by the -# client/server. See api/README.md for more information. -# -# Some style notes: -# - This file is used by ReDoc, which allows GitHub Flavored Markdown in -# descriptions. -# - There is no maximum line length, for ease of editing and pretty diffs. -# - operationIds are in the format "NounVerb", with a singular noun. - -swagger: "2.0" -schemes: - - "http" - - "https" -produces: - - "application/json" - - "text/plain" -consumes: - - "application/json" - - "text/plain" -basePath: "/v1.40" -info: - title: "Docker Engine API" - version: "1.40" - x-logo: - url: "https://docs.docker.com/images/logo-docker-main.png" - description: | - The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. - - Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. - - # Errors - - The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: - - ``` - { - "message": "page not found" - } - ``` - - # Versioning - - The API is usually changed in each release, so API calls are versioned to - ensure that clients don't break. To lock to a specific version of the API, - you prefix the URL with its version, for example, call `/v1.30/info` to use - the v1.30 version of the `/info` endpoint. If the API version specified in - the URL is not supported by the daemon, a HTTP `400 Bad Request` error message - is returned. - - If you omit the version-prefix, the current version of the API (v1.40) is used. - For example, calling `/info` is the same as calling `/v1.40/info`. Using the - API without a version-prefix is deprecated and will be removed in a future release. - - Engine releases in the near future should support this version of the API, - so your client will continue to work even if it is talking to a newer Engine. - - The API uses an open schema model, which means server may add extra properties - to responses. Likewise, the server will ignore any extra query parameters and - request body properties. When you write clients, you need to ignore additional - properties in responses to ensure they do not break when talking to newer - daemons. - - - # Authentication - - Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: - - ``` - { - "username": "string", - "password": "string", - "email": "string", - "serveraddress": "string" - } - ``` - - The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. - - If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: - - ``` - { - "identitytoken": "9cbaf023786cd7..." - } - ``` - -# The tags on paths define the menu sections in the ReDoc documentation, so -# the usage of tags must make sense for that: -# - They should be singular, not plural. -# - There should not be too many tags, or the menu becomes unwieldy. For -# example, it is preferable to add a path to the "System" tag instead of -# creating a tag with a single path in it. -# - The order of tags in this list defines the order in the menu. -tags: - # Primary objects - - name: "Container" - x-displayName: "Containers" - description: | - Create and manage containers. - - name: "Image" - x-displayName: "Images" - - name: "Network" - x-displayName: "Networks" - description: | - Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. - - name: "Volume" - x-displayName: "Volumes" - description: | - Create and manage persistent storage that can be attached to containers. - - name: "Exec" - x-displayName: "Exec" - description: | - Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. - - To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. - # Swarm things - - name: "Swarm" - x-displayName: "Swarm" - description: | - Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - - name: "Node" - x-displayName: "Nodes" - description: | - Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Service" - x-displayName: "Services" - description: | - Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Task" - x-displayName: "Tasks" - description: | - A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Secret" - x-displayName: "Secrets" - description: | - Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - - name: "Config" - x-displayName: "Configs" - description: | - Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. - # System things - - name: "Plugin" - x-displayName: "Plugins" - - name: "System" - x-displayName: "System" - -definitions: - Port: - type: "object" - description: "An open port on a container" - required: [PrivatePort, Type] - properties: - IP: - type: "string" - format: "ip-address" - description: "Host IP address that the container's port is mapped to" - PrivatePort: - type: "integer" - format: "uint16" - x-nullable: false - description: "Port on the container" - PublicPort: - type: "integer" - format: "uint16" - description: "Port exposed on the host" - Type: - type: "string" - x-nullable: false - enum: ["tcp", "udp", "sctp"] - example: - PrivatePort: 8080 - PublicPort: 80 - Type: "tcp" - - MountPoint: - type: "object" - description: "A mount point inside a container" - properties: - Type: - type: "string" - Name: - type: "string" - Source: - type: "string" - Destination: - type: "string" - Driver: - type: "string" - Mode: - type: "string" - RW: - type: "boolean" - Propagation: - type: "string" - - DeviceMapping: - type: "object" - description: "A device mapping between the host and container" - properties: - PathOnHost: - type: "string" - PathInContainer: - type: "string" - CgroupPermissions: - type: "string" - example: - PathOnHost: "/dev/deviceName" - PathInContainer: "/dev/deviceName" - CgroupPermissions: "mrw" - - DeviceRequest: - type: "object" - description: "A request for devices to be sent to device drivers" - properties: - Driver: - type: "string" - example: "nvidia" - Count: - type: "integer" - example: -1 - DeviceIDs: - type: "array" - items: - type: "string" - example: - - "0" - - "1" - - "GPU-fef8089b-4820-abfc-e83e-94318197576e" - Capabilities: - description: | - A list of capabilities; an OR list of AND lists of capabilities. - type: "array" - items: - type: "array" - items: - type: "string" - example: - # gpu AND nvidia AND compute - - ["gpu", "nvidia", "compute"] - Options: - description: | - Driver-specific options, specified as a key/value pairs. These options - are passed directly to the driver. - type: "object" - additionalProperties: - type: "string" - - ThrottleDevice: - type: "object" - properties: - Path: - description: "Device path" - type: "string" - Rate: - description: "Rate" - type: "integer" - format: "int64" - minimum: 0 - - Mount: - type: "object" - properties: - Target: - description: "Container path." - type: "string" - Source: - description: "Mount source (e.g. a volume name, a host path)." - type: "string" - Type: - description: | - The mount type. Available types: - - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - ReadOnly: - description: "Whether the mount should be read-only." - type: "boolean" - Consistency: - description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." - type: "string" - BindOptions: - description: "Optional configuration for the `bind` type." - type: "object" - properties: - Propagation: - description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." - type: "string" - enum: - - "private" - - "rprivate" - - "shared" - - "rshared" - - "slave" - - "rslave" - NonRecursive: - description: "Disable recursive bind mount." - type: "boolean" - default: false - VolumeOptions: - description: "Optional configuration for the `volume` type." - type: "object" - properties: - NoCopy: - description: "Populate volume with data from the target." - type: "boolean" - default: false - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - DriverConfig: - description: "Map of driver specific options" - type: "object" - properties: - Name: - description: "Name of the driver to use to create the volume." - type: "string" - Options: - description: "key/value map of driver specific options." - type: "object" - additionalProperties: - type: "string" - TmpfsOptions: - description: "Optional configuration for the `tmpfs` type." - type: "object" - properties: - SizeBytes: - description: "The size for the tmpfs mount in bytes." - type: "integer" - format: "int64" - Mode: - description: "The permission mode for the tmpfs mount in an integer." - type: "integer" - - RestartPolicy: - description: | - The behavior to apply when the container exits. The default is not to restart. - - An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. - type: "object" - properties: - Name: - type: "string" - description: | - - Empty string means not to restart - - `always` Always restart - - `unless-stopped` Restart always except when the user has manually stopped the container - - `on-failure` Restart only when the container exit code is non-zero - enum: - - "" - - "always" - - "unless-stopped" - - "on-failure" - MaximumRetryCount: - type: "integer" - description: "If `on-failure` is used, the number of times to retry before giving up" - - Resources: - description: "A container's resources (cgroups config, ulimits, etc)" - type: "object" - properties: - # Applicable to all platforms - CpuShares: - description: "An integer value representing this container's relative CPU weight versus other containers." - type: "integer" - Memory: - description: "Memory limit in bytes." - type: "integer" - format: "int64" - default: 0 - # Applicable to UNIX platforms - CgroupParent: - description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." - type: "string" - BlkioWeight: - description: "Block IO weight (relative weight)." - type: "integer" - minimum: 0 - maximum: 1000 - BlkioWeightDevice: - description: | - Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. - type: "array" - items: - type: "object" - properties: - Path: - type: "string" - Weight: - type: "integer" - minimum: 0 - BlkioDeviceReadBps: - description: | - Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteBps: - description: | - Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceReadIOps: - description: | - Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteIOps: - description: | - Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - CpuPeriod: - description: "The length of a CPU period in microseconds." - type: "integer" - format: "int64" - CpuQuota: - description: "Microseconds of CPU time that the container can get in a CPU period." - type: "integer" - format: "int64" - CpuRealtimePeriod: - description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." - type: "integer" - format: "int64" - CpuRealtimeRuntime: - description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." - type: "integer" - format: "int64" - CpusetCpus: - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" - type: "string" - example: "0-3" - CpusetMems: - description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." - type: "string" - Devices: - description: "A list of devices to add to the container." - type: "array" - items: - $ref: "#/definitions/DeviceMapping" - DeviceCgroupRules: - description: "a list of cgroup rules to apply to the container" - type: "array" - items: - type: "string" - example: "c 13:* rwm" - DeviceRequests: - description: "a list of requests for devices to be sent to device drivers" - type: "array" - items: - $ref: "#/definitions/DeviceRequest" - DiskQuota: - description: "Disk limit (in bytes)." - type: "integer" - format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" - example: 209715200 - KernelMemoryTCP: - description: "Hard limit for kernel TCP buffer memory (in bytes)." - type: "integer" - format: "int64" - MemoryReservation: - description: "Memory soft limit in bytes." - type: "integer" - format: "int64" - MemorySwap: - description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." - type: "integer" - format: "int64" - MemorySwappiness: - description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." - type: "integer" - format: "int64" - minimum: 0 - maximum: 100 - NanoCPUs: - description: "CPU quota in units of 10-9 CPUs." - type: "integer" - format: "int64" - OomKillDisable: - description: "Disable OOM Killer for the container." - type: "boolean" - Init: - description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." - type: "boolean" - x-nullable: true - PidsLimit: - description: | - Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. - type: "integer" - format: "int64" - x-nullable: true - Ulimits: - description: | - A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - # Applicable to Windows - CpuCount: - description: | - The number of usable CPUs (Windows only). - - On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - CpuPercent: - description: | - The usable percentage of the available CPUs (Windows only). - - On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - IOMaximumIOps: - description: "Maximum IOps for the container system drive (Windows only)" - type: "integer" - format: "int64" - IOMaximumBandwidth: - description: "Maximum IO in bytes per second for the container system drive (Windows only)" - type: "integer" - format: "int64" - - ResourceObject: - description: "An object describing the resources which can be advertised by a node and requested by a task" - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - GenericResources: - $ref: "#/definitions/GenericResources" - - GenericResources: - description: "User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)" - type: "array" - items: - type: "object" - properties: - NamedResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "string" - DiscreteResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "integer" - format: "int64" - example: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - HealthConfig: - description: "A test to perform to check that the container is healthy." - type: "object" - properties: - Test: - description: | - The test to perform. Possible values are: - - - `[]` inherit healthcheck from image or parent image - - `["NONE"]` disable healthcheck - - `["CMD", args...]` exec arguments directly - - `["CMD-SHELL", command]` run command with system's default shell - type: "array" - items: - type: "string" - Interval: - description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." - type: "integer" - Timeout: - description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit." - type: "integer" - Retries: - description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." - type: "integer" - StartPeriod: - description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." - type: "integer" - - HostConfig: - description: "Container configuration that depends on the host we are running on" - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - # Applicable to all platforms - Binds: - type: "array" - description: | - A list of volume bindings for this container. Each volume binding is a string in one of these forms: - - - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - - `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. - - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. - items: - type: "string" - ContainerIDFile: - type: "string" - description: "Path to a file where the container ID is written" - LogConfig: - type: "object" - description: "The logging configuration for this container" - properties: - Type: - type: "string" - enum: - - "json-file" - - "syslog" - - "journald" - - "gelf" - - "fluentd" - - "awslogs" - - "splunk" - - "etwlogs" - - "none" - Config: - type: "object" - additionalProperties: - type: "string" - NetworkMode: - type: "string" - description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken - as a custom network's name to which this container should connect to." - PortBindings: - $ref: "#/definitions/PortMap" - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - AutoRemove: - type: "boolean" - description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." - VolumeDriver: - type: "string" - description: "Driver that this container uses to mount volumes." - VolumesFrom: - type: "array" - description: "A list of volumes to inherit from another container, specified in the form `[:]`." - items: - type: "string" - Mounts: - description: "Specification for mounts to be added to the container." - type: "array" - items: - $ref: "#/definitions/Mount" - - # Applicable to UNIX platforms - Capabilities: - type: "array" - description: | - A list of kernel capabilities to be available for container (this overrides the default set). - - Conflicts with options 'CapAdd' and 'CapDrop'" - items: - type: "string" - CapAdd: - type: "array" - description: "A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'" - items: - type: "string" - CapDrop: - type: "array" - description: "A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'" - items: - type: "string" - Dns: - type: "array" - description: "A list of DNS servers for the container to use." - items: - type: "string" - DnsOptions: - type: "array" - description: "A list of DNS options." - items: - type: "string" - DnsSearch: - type: "array" - description: "A list of DNS search domains." - items: - type: "string" - ExtraHosts: - type: "array" - description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - items: - type: "string" - GroupAdd: - type: "array" - description: "A list of additional groups that the container process will run as." - items: - type: "string" - IpcMode: - type: "string" - description: | - IPC sharing mode for the container. Possible values are: - - - `"none"`: own private IPC namespace, with /dev/shm not mounted - - `"private"`: own private IPC namespace - - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - - `"container:"`: join another (shareable) container's IPC namespace - - `"host"`: use the host system's IPC namespace - - If not specified, daemon default is used, which can either be `"private"` - or `"shareable"`, depending on daemon version and configuration. - Cgroup: - type: "string" - description: "Cgroup to use for the container." - Links: - type: "array" - description: "A list of links for the container in the form `container_name:alias`." - items: - type: "string" - OomScoreAdj: - type: "integer" - description: "An integer value containing the score given to the container in order to tune OOM killer preferences." - example: 500 - PidMode: - type: "string" - description: | - Set the PID (Process) Namespace mode for the container. It can be either: - - - `"container:"`: joins another container's PID namespace - - `"host"`: use the host's PID namespace inside the container - Privileged: - type: "boolean" - description: "Gives the container full access to the host." - PublishAllPorts: - type: "boolean" - description: | - Allocates an ephemeral host port for all of a container's - exposed ports. - - Ports are de-allocated when the container stops and allocated when the container starts. - The allocated port might be changed when restarting the container. - - The port is selected from the ephemeral port range that depends on the kernel. - For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. - ReadonlyRootfs: - type: "boolean" - description: "Mount the container's root filesystem as read only." - SecurityOpt: - type: "array" - description: "A list of string values to customize labels for MLS - systems, such as SELinux." - items: - type: "string" - StorageOpt: - type: "object" - description: | - Storage driver options for this container, in the form `{"size": "120G"}`. - additionalProperties: - type: "string" - Tmpfs: - type: "object" - description: | - A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. - additionalProperties: - type: "string" - UTSMode: - type: "string" - description: "UTS namespace to use for the container." - UsernsMode: - type: "string" - description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." - ShmSize: - type: "integer" - description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." - minimum: 0 - Sysctls: - type: "object" - description: | - A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` - additionalProperties: - type: "string" - Runtime: - type: "string" - description: "Runtime to use with this container." - # Applicable to Windows - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array. (Windows only)" - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - Isolation: - type: "string" - description: "Isolation technology of the container. (Windows only)" - enum: - - "default" - - "process" - - "hyperv" - MaskedPaths: - type: "array" - description: "The list of paths to be masked inside the container (this overrides the default set of paths)" - items: - type: "string" - ReadonlyPaths: - type: "array" - description: "The list of paths to be set as read-only inside the container (this overrides the default set of paths)" - items: - type: "string" - - ContainerConfig: - description: "Configuration for a container that is portable between hosts" - type: "object" - properties: - Hostname: - description: "The hostname to use for the container, as a valid RFC 1123 hostname." - type: "string" - Domainname: - description: "The domain name to use for the container." - type: "string" - User: - description: "The user that commands are run as inside the container." - type: "string" - AttachStdin: - description: "Whether to attach to `stdin`." - type: "boolean" - default: false - AttachStdout: - description: "Whether to attach to `stdout`." - type: "boolean" - default: true - AttachStderr: - description: "Whether to attach to `stderr`." - type: "boolean" - default: true - ExposedPorts: - description: | - An object mapping ports to an empty object in the form: - - `{"/": {}}` - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - Tty: - description: "Attach standard streams to a TTY, including `stdin` if it is not closed." - type: "boolean" - default: false - OpenStdin: - description: "Open `stdin`" - type: "boolean" - default: false - StdinOnce: - description: "Close `stdin` after one attached client disconnects" - type: "boolean" - default: false - Env: - description: | - A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. - type: "array" - items: - type: "string" - Cmd: - description: "Command to run specified as a string or an array of strings." - type: "array" - items: - type: "string" - Healthcheck: - $ref: "#/definitions/HealthConfig" - ArgsEscaped: - description: "Command is already escaped (Windows only)" - type: "boolean" - Image: - description: "The name of the image to use when creating the container" - type: "string" - Volumes: - description: "An object mapping mount point paths inside the container to empty objects." - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - WorkingDir: - description: "The working directory for commands to run in." - type: "string" - Entrypoint: - description: | - The entry point for the container as a string or an array of strings. - - If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). - type: "array" - items: - type: "string" - NetworkDisabled: - description: "Disable networking for the container." - type: "boolean" - MacAddress: - description: "MAC address of the container." - type: "string" - OnBuild: - description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." - type: "array" - items: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - StopSignal: - description: "Signal to stop a container as a string or unsigned integer." - type: "string" - default: "SIGTERM" - StopTimeout: - description: "Timeout to stop a container in seconds." - type: "integer" - default: 10 - Shell: - description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." - type: "array" - items: - type: "string" - - NetworkSettings: - description: "NetworkSettings exposes the network settings in the API" - type: "object" - properties: - Bridge: - description: Name of the network'a bridge (for example, `docker0`). - type: "string" - example: "docker0" - SandboxID: - description: SandboxID uniquely represents a container's network stack. - type: "string" - example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" - HairpinMode: - description: | - Indicates if hairpin NAT should be enabled on the virtual interface. - type: "boolean" - example: false - LinkLocalIPv6Address: - description: IPv6 unicast address using the link-local prefix. - type: "string" - example: "fe80::42:acff:fe11:1" - LinkLocalIPv6PrefixLen: - description: Prefix length of the IPv6 unicast address. - type: "integer" - example: "64" - Ports: - $ref: "#/definitions/PortMap" - SandboxKey: - description: SandboxKey identifies the sandbox - type: "string" - example: "/var/run/docker/netns/8ab54b426c38" - - # TODO is SecondaryIPAddresses actually used? - SecondaryIPAddresses: - description: "" - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - # TODO is SecondaryIPv6Addresses actually used? - SecondaryIPv6Addresses: - description: "" - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - # TODO properties below are part of DefaultNetworkSettings, which is - # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 - EndpointID: - description: | - EndpointID uniquely represents a service endpoint in a Sandbox. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.1" - GlobalIPv6Address: - description: | - Global IPv6 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 64 - IPAddress: - description: | - IPv4 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address for this network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8:2::100" - MacAddress: - description: | - MAC address for the container on the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "02:42:ac:11:00:04" - Networks: - description: | - Information about all networks that the container is connected to. - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - - Address: - description: Address represents an IPv4 or IPv6 IP address. - type: "object" - properties: - Addr: - description: IP address. - type: "string" - PrefixLen: - description: Mask length of the IP address. - type: "integer" - - PortMap: - description: | - PortMap describes the mapping of container ports to host ports, using the - container's port-number and protocol as key in the format `/`, - for example, `80/udp`. - - If a container's port is mapped for multiple protocols, separate entries - are added to the mapping table. - type: "object" - additionalProperties: - type: "array" - items: - $ref: "#/definitions/PortBinding" - example: - "443/tcp": - - HostIp: "127.0.0.1" - HostPort: "4443" - "80/tcp": - - HostIp: "0.0.0.0" - HostPort: "80" - - HostIp: "0.0.0.0" - HostPort: "8080" - "80/udp": - - HostIp: "0.0.0.0" - HostPort: "80" - "53/udp": - - HostIp: "0.0.0.0" - HostPort: "53" - "2377/tcp": null - - PortBinding: - description: | - PortBinding represents a binding between a host IP address and a host - port. - type: "object" - x-nullable: true - properties: - HostIp: - description: "Host IP address that the container's port is mapped to." - type: "string" - example: "127.0.0.1" - HostPort: - description: "Host port number that the container's port is mapped to." - type: "string" - example: "4443" - - GraphDriverData: - description: "Information about a container's graph driver." - type: "object" - required: [Name, Data] - properties: - Name: - type: "string" - x-nullable: false - Data: - type: "object" - x-nullable: false - additionalProperties: - type: "string" - - Image: - type: "object" - required: - - Id - - Parent - - Comment - - Created - - Container - - DockerVersion - - Author - - Architecture - - Os - - Size - - VirtualSize - - GraphDriver - - RootFS - properties: - Id: - type: "string" - x-nullable: false - RepoTags: - type: "array" - items: - type: "string" - RepoDigests: - type: "array" - items: - type: "string" - Parent: - type: "string" - x-nullable: false - Comment: - type: "string" - x-nullable: false - Created: - type: "string" - x-nullable: false - Container: - type: "string" - x-nullable: false - ContainerConfig: - $ref: "#/definitions/ContainerConfig" - DockerVersion: - type: "string" - x-nullable: false - Author: - type: "string" - x-nullable: false - Config: - $ref: "#/definitions/ContainerConfig" - Architecture: - type: "string" - x-nullable: false - Os: - type: "string" - x-nullable: false - OsVersion: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - VirtualSize: - type: "integer" - format: "int64" - x-nullable: false - GraphDriver: - $ref: "#/definitions/GraphDriverData" - RootFS: - type: "object" - required: [Type] - properties: - Type: - type: "string" - x-nullable: false - Layers: - type: "array" - items: - type: "string" - BaseLayer: - type: "string" - Metadata: - type: "object" - properties: - LastTagTime: - type: "string" - format: "dateTime" - - ImageSummary: - type: "object" - required: - - Id - - ParentId - - RepoTags - - RepoDigests - - Created - - Size - - SharedSize - - VirtualSize - - Labels - - Containers - properties: - Id: - type: "string" - x-nullable: false - ParentId: - type: "string" - x-nullable: false - RepoTags: - type: "array" - x-nullable: false - items: - type: "string" - RepoDigests: - type: "array" - x-nullable: false - items: - type: "string" - Created: - type: "integer" - x-nullable: false - Size: - type: "integer" - x-nullable: false - SharedSize: - type: "integer" - x-nullable: false - VirtualSize: - type: "integer" - x-nullable: false - Labels: - type: "object" - x-nullable: false - additionalProperties: - type: "string" - Containers: - x-nullable: false - type: "integer" - - AuthConfig: - type: "object" - properties: - username: - type: "string" - password: - type: "string" - email: - type: "string" - serveraddress: - type: "string" - example: - username: "hannibal" - password: "xxxx" - serveraddress: "https://index.docker.io/v1/" - - ProcessConfig: - type: "object" - properties: - privileged: - type: "boolean" - user: - type: "string" - tty: - type: "boolean" - entrypoint: - type: "string" - arguments: - type: "array" - items: - type: "string" - - Volume: - type: "object" - required: [Name, Driver, Mountpoint, Labels, Scope, Options] - properties: - Name: - type: "string" - description: "Name of the volume." - x-nullable: false - Driver: - type: "string" - description: "Name of the volume driver used by the volume." - x-nullable: false - Mountpoint: - type: "string" - description: "Mount path of the volume on the host." - x-nullable: false - CreatedAt: - type: "string" - format: "dateTime" - description: "Date/Time the volume was created." - Status: - type: "object" - description: | - Low-level details about the volume, provided by the volume driver. - Details are returned as a map with key/value pairs: - `{"key":"value","key2":"value2"}`. - - The `Status` field is optional, and is omitted if the volume driver - does not support this feature. - additionalProperties: - type: "object" - Labels: - type: "object" - description: "User-defined key/value metadata." - x-nullable: false - additionalProperties: - type: "string" - Scope: - type: "string" - description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." - default: "local" - x-nullable: false - enum: ["local", "global"] - Options: - type: "object" - description: "The driver specific options used when creating the volume." - additionalProperties: - type: "string" - UsageData: - type: "object" - x-nullable: true - required: [Size, RefCount] - description: | - Usage details about the volume. This information is used by the - `GET /system/df` endpoint, and omitted in other endpoints. - properties: - Size: - type: "integer" - default: -1 - description: | - Amount of disk space used by the volume (in bytes). This information - is only available for volumes created with the `"local"` volume - driver. For volumes created with other volume drivers, this field - is set to `-1` ("not available") - x-nullable: false - RefCount: - type: "integer" - default: -1 - description: | - The number of containers referencing this volume. This field - is set to `-1` if the reference-count is not available. - x-nullable: false - - example: - Name: "tardis" - Driver: "custom" - Mountpoint: "/var/lib/docker/volumes/tardis" - Status: - hello: "world" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - CreatedAt: "2016-06-07T20:31:11.853781916Z" - - Network: - type: "object" - properties: - Name: - type: "string" - Id: - type: "string" - Created: - type: "string" - format: "dateTime" - Scope: - type: "string" - Driver: - type: "string" - EnableIPv6: - type: "boolean" - IPAM: - $ref: "#/definitions/IPAM" - Internal: - type: "boolean" - Attachable: - type: "boolean" - Ingress: - type: "boolean" - Containers: - type: "object" - additionalProperties: - $ref: "#/definitions/NetworkContainer" - Options: - type: "object" - additionalProperties: - type: "string" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - Name: "net01" - Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" - Created: "2016-10-19T04:33:30.360899459Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - IPAM: - Driver: "default" - Config: - - Subnet: "172.19.0.0/16" - Gateway: "172.19.0.1" - Options: - foo: "bar" - Internal: false - Attachable: false - Ingress: false - Containers: - 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: - Name: "test" - EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: "02:42:ac:13:00:02" - IPv4Address: "172.19.0.2/16" - IPv6Address: "" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - IPAM: - type: "object" - properties: - Driver: - description: "Name of the IPAM driver to use." - type: "string" - default: "default" - Config: - description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" - type: "array" - items: - type: "object" - additionalProperties: - type: "string" - Options: - description: "Driver-specific options, specified as a map." - type: "object" - additionalProperties: - type: "string" - - NetworkContainer: - type: "object" - properties: - Name: - type: "string" - EndpointID: - type: "string" - MacAddress: - type: "string" - IPv4Address: - type: "string" - IPv6Address: - type: "string" - - BuildInfo: - type: "object" - properties: - id: - type: "string" - stream: - type: "string" - error: - type: "string" - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - aux: - $ref: "#/definitions/ImageID" - - BuildCache: - type: "object" - properties: - ID: - type: "string" - Parent: - type: "string" - Type: - type: "string" - Description: - type: "string" - InUse: - type: "boolean" - Shared: - type: "boolean" - Size: - type: "integer" - CreatedAt: - type: "integer" - LastUsedAt: - type: "integer" - x-nullable: true - UsageCount: - type: "integer" - - ImageID: - type: "object" - description: "Image ID or Digest" - properties: - ID: - type: "string" - example: - ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - - CreateImageInfo: - type: "object" - properties: - id: - type: "string" - error: - type: "string" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - PushImageInfo: - type: "object" - properties: - error: - type: "string" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - ErrorDetail: - type: "object" - properties: - code: - type: "integer" - message: - type: "string" - - ProgressDetail: - type: "object" - properties: - current: - type: "integer" - total: - type: "integer" - - ErrorResponse: - description: "Represents an error." - type: "object" - required: ["message"] - properties: - message: - description: "The error message." - type: "string" - x-nullable: false - example: - message: "Something went wrong." - - IdResponse: - description: "Response to an API call that returns just an Id" - type: "object" - required: ["Id"] - properties: - Id: - description: "The id of the newly created object." - type: "string" - x-nullable: false - - EndpointSettings: - description: "Configuration for a network endpoint." - type: "object" - properties: - # Configurations - IPAMConfig: - $ref: "#/definitions/EndpointIPAMConfig" - Links: - type: "array" - items: - type: "string" - example: - - "container_1" - - "container_2" - Aliases: - type: "array" - items: - type: "string" - example: - - "server_x" - - "server_y" - - # Operational data - NetworkID: - description: | - Unique ID of the network. - type: "string" - example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" - EndpointID: - description: | - Unique ID for the service endpoint in a Sandbox. - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for this network. - type: "string" - example: "172.17.0.1" - IPAddress: - description: | - IPv4 address. - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address. - type: "string" - example: "2001:db8:2::100" - GlobalIPv6Address: - description: | - Global IPv6 address. - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - type: "integer" - format: "int64" - example: 64 - MacAddress: - description: | - MAC address for the endpoint on this network. - type: "string" - example: "02:42:ac:11:00:04" - DriverOpts: - description: | - DriverOpts is a mapping of driver options and values. These options - are passed directly to the driver and are driver specific. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - - EndpointIPAMConfig: - description: | - EndpointIPAMConfig represents an endpoint's IPAM configuration. - type: "object" - x-nullable: true - properties: - IPv4Address: - type: "string" - example: "172.20.30.33" - IPv6Address: - type: "string" - example: "2001:db8:abcd::3033" - LinkLocalIPs: - type: "array" - items: - type: "string" - example: - - "169.254.34.68" - - "fe80::3468" - - PluginMount: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Source, Destination, Type, Options] - properties: - Name: - type: "string" - x-nullable: false - example: "some-mount" - Description: - type: "string" - x-nullable: false - example: "This is a mount that's used by the plugin." - Settable: - type: "array" - items: - type: "string" - Source: - type: "string" - example: "/var/lib/docker/plugins/" - Destination: - type: "string" - x-nullable: false - example: "/mnt/state" - Type: - type: "string" - x-nullable: false - example: "bind" - Options: - type: "array" - items: - type: "string" - example: - - "rbind" - - "rw" - - PluginDevice: - type: "object" - required: [Name, Description, Settable, Path] - x-nullable: false - properties: - Name: - type: "string" - x-nullable: false - Description: - type: "string" - x-nullable: false - Settable: - type: "array" - items: - type: "string" - Path: - type: "string" - example: "/dev/fuse" - - PluginEnv: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - Description: - x-nullable: false - type: "string" - Settable: - type: "array" - items: - type: "string" - Value: - type: "string" - - PluginInterfaceType: - type: "object" - x-nullable: false - required: [Prefix, Capability, Version] - properties: - Prefix: - type: "string" - x-nullable: false - Capability: - type: "string" - x-nullable: false - Version: - type: "string" - x-nullable: false - - Plugin: - description: "A plugin for the Engine API" - type: "object" - required: [Settings, Enabled, Config, Name] - properties: - Id: - type: "string" - example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" - Name: - type: "string" - x-nullable: false - example: "tiborvass/sample-volume-plugin" - Enabled: - description: "True if the plugin is running. False if the plugin is not running, only installed." - type: "boolean" - x-nullable: false - example: true - Settings: - description: "Settings that can be modified by users." - type: "object" - x-nullable: false - required: [Args, Devices, Env, Mounts] - properties: - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - type: "string" - example: - - "DEBUG=0" - Args: - type: "array" - items: - type: "string" - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PluginReference: - description: "plugin remote reference used to push/pull the plugin" - type: "string" - x-nullable: false - example: "localhost:5000/tiborvass/sample-volume-plugin:latest" - Config: - description: "The config of a plugin." - type: "object" - x-nullable: false - required: - - Description - - Documentation - - Interface - - Entrypoint - - WorkDir - - Network - - Linux - - PidHost - - PropagatedMount - - IpcHost - - Mounts - - Env - - Args - properties: - DockerVersion: - description: "Docker Version used to create the plugin" - type: "string" - x-nullable: false - example: "17.06.0-ce" - Description: - type: "string" - x-nullable: false - example: "A sample volume plugin for Docker" - Documentation: - type: "string" - x-nullable: false - example: "https://docs.docker.com/engine/extend/plugins/" - Interface: - description: "The interface between Docker and the plugin" - x-nullable: false - type: "object" - required: [Types, Socket] - properties: - Types: - type: "array" - items: - $ref: "#/definitions/PluginInterfaceType" - example: - - "docker.volumedriver/1.0" - Socket: - type: "string" - x-nullable: false - example: "plugins.sock" - ProtocolScheme: - type: "string" - example: "some.protocol/v1.0" - description: "Protocol to use for clients connecting to the plugin." - enum: - - "" - - "moby.plugins.http/v1" - Entrypoint: - type: "array" - items: - type: "string" - example: - - "/usr/bin/sample-volume-plugin" - - "/data" - WorkDir: - type: "string" - x-nullable: false - example: "/bin/" - User: - type: "object" - x-nullable: false - properties: - UID: - type: "integer" - format: "uint32" - example: 1000 - GID: - type: "integer" - format: "uint32" - example: 1000 - Network: - type: "object" - x-nullable: false - required: [Type] - properties: - Type: - x-nullable: false - type: "string" - example: "host" - Linux: - type: "object" - x-nullable: false - required: [Capabilities, AllowAllDevices, Devices] - properties: - Capabilities: - type: "array" - items: - type: "string" - example: - - "CAP_SYS_ADMIN" - - "CAP_SYSLOG" - AllowAllDevices: - type: "boolean" - x-nullable: false - example: false - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PropagatedMount: - type: "string" - x-nullable: false - example: "/mnt/volumes" - IpcHost: - type: "boolean" - x-nullable: false - example: false - PidHost: - type: "boolean" - x-nullable: false - example: false - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - $ref: "#/definitions/PluginEnv" - example: - - Name: "DEBUG" - Description: "If set, prints debug messages" - Settable: null - Value: "0" - Args: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - example: "args" - Description: - x-nullable: false - type: "string" - example: "command line arguments" - Settable: - type: "array" - items: - type: "string" - Value: - type: "array" - items: - type: "string" - rootfs: - type: "object" - properties: - type: - type: "string" - example: "layers" - diff_ids: - type: "array" - items: - type: "string" - example: - - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - - ObjectVersion: - description: | - The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. - The client must send the version number along with the modified specification when updating these objects. - This approach ensures safe concurrency and determinism in that the change on the object - may not be applied if the version number has changed from the last read. In other words, - if two update requests specify the same base version, only one of the requests can succeed. - As a result, two separate update requests that happen at the same time will not - unintentionally overwrite each other. - type: "object" - properties: - Index: - type: "integer" - format: "uint64" - example: 373531 - - NodeSpec: - type: "object" - properties: - Name: - description: "Name for the node." - type: "string" - example: "my-node" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Role: - description: "Role of the node." - type: "string" - enum: - - "worker" - - "manager" - example: "manager" - Availability: - description: "Availability of the node." - type: "string" - enum: - - "active" - - "pause" - - "drain" - example: "active" - example: - Availability: "active" - Name: "node-name" - Role: "manager" - Labels: - foo: "bar" - - Node: - type: "object" - properties: - ID: - type: "string" - example: "24ifsmvkjbyhk" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the node was added to the swarm in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the node was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/NodeSpec" - Description: - $ref: "#/definitions/NodeDescription" - Status: - $ref: "#/definitions/NodeStatus" - ManagerStatus: - $ref: "#/definitions/ManagerStatus" - - NodeDescription: - description: | - NodeDescription encapsulates the properties of the Node as reported by the - agent. - type: "object" - properties: - Hostname: - type: "string" - example: "bf3067039e47" - Platform: - $ref: "#/definitions/Platform" - Resources: - $ref: "#/definitions/ResourceObject" - Engine: - $ref: "#/definitions/EngineDescription" - TLSInfo: - $ref: "#/definitions/TLSInfo" - - Platform: - description: | - Platform represents the platform (Arch/OS). - type: "object" - properties: - Architecture: - description: | - Architecture represents the hardware architecture (for example, - `x86_64`). - type: "string" - example: "x86_64" - OS: - description: | - OS represents the Operating System (for example, `linux` or `windows`). - type: "string" - example: "linux" - - EngineDescription: - description: "EngineDescription provides information about an engine." - type: "object" - properties: - EngineVersion: - type: "string" - example: "17.06.0" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - foo: "bar" - Plugins: - type: "array" - items: - type: "object" - properties: - Type: - type: "string" - Name: - type: "string" - example: - - Type: "Log" - Name: "awslogs" - - Type: "Log" - Name: "fluentd" - - Type: "Log" - Name: "gcplogs" - - Type: "Log" - Name: "gelf" - - Type: "Log" - Name: "journald" - - Type: "Log" - Name: "json-file" - - Type: "Log" - Name: "logentries" - - Type: "Log" - Name: "splunk" - - Type: "Log" - Name: "syslog" - - Type: "Network" - Name: "bridge" - - Type: "Network" - Name: "host" - - Type: "Network" - Name: "ipvlan" - - Type: "Network" - Name: "macvlan" - - Type: "Network" - Name: "null" - - Type: "Network" - Name: "overlay" - - Type: "Volume" - Name: "local" - - Type: "Volume" - Name: "localhost:5000/vieux/sshfs:latest" - - Type: "Volume" - Name: "vieux/sshfs:latest" - - TLSInfo: - description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" - type: "object" - properties: - TrustRoot: - description: "The root CA certificate(s) that are used to validate leaf TLS certificates" - type: "string" - CertIssuerSubject: - description: "The base64-url-safe-encoded raw subject bytes of the issuer" - type: "string" - CertIssuerPublicKey: - description: "The base64-url-safe-encoded raw public key bytes of the issuer" - type: "string" - example: - TrustRoot: | - -----BEGIN CERTIFICATE----- - MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw - EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 - MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH - A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf - 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB - Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO - PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz - pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H - -----END CERTIFICATE----- - CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" - CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" - - NodeStatus: - description: | - NodeStatus represents the status of a node. - - It provides the current status of the node, as seen by the manager. - type: "object" - properties: - State: - $ref: "#/definitions/NodeState" - Message: - type: "string" - example: "" - Addr: - description: "IP address of the node." - type: "string" - example: "172.17.0.2" - - NodeState: - description: "NodeState represents the state of a node." - type: "string" - enum: - - "unknown" - - "down" - - "ready" - - "disconnected" - example: "ready" - - ManagerStatus: - description: | - ManagerStatus represents the status of a manager. - - It provides the current status of a node's manager component, if the node - is a manager. - x-nullable: true - type: "object" - properties: - Leader: - type: "boolean" - default: false - example: true - Reachability: - $ref: "#/definitions/Reachability" - Addr: - description: | - The IP address and port at which the manager is reachable. - type: "string" - example: "10.0.0.46:2377" - - Reachability: - description: "Reachability represents the reachability of a node." - type: "string" - enum: - - "unknown" - - "unreachable" - - "reachable" - example: "reachable" - - SwarmSpec: - description: "User modifiable swarm configuration." - type: "object" - properties: - Name: - description: "Name of the swarm." - type: "string" - example: "default" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.corp.type: "production" - com.example.corp.department: "engineering" - Orchestration: - description: "Orchestration configuration." - type: "object" - x-nullable: true - properties: - TaskHistoryRetentionLimit: - description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." - type: "integer" - format: "int64" - example: 10 - Raft: - description: "Raft configuration." - type: "object" - properties: - SnapshotInterval: - description: "The number of log entries between snapshots." - type: "integer" - format: "uint64" - example: 10000 - KeepOldSnapshots: - description: "The number of snapshots to keep beyond the current snapshot." - type: "integer" - format: "uint64" - LogEntriesForSlowFollowers: - description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." - type: "integer" - format: "uint64" - example: 500 - ElectionTick: - description: | - The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. - - A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 3 - HeartbeatTick: - description: | - The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. - - A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 1 - Dispatcher: - description: "Dispatcher configuration." - type: "object" - x-nullable: true - properties: - HeartbeatPeriod: - description: "The delay for an agent to send a heartbeat to the dispatcher." - type: "integer" - format: "int64" - example: 5000000000 - CAConfig: - description: "CA configuration." - type: "object" - x-nullable: true - properties: - NodeCertExpiry: - description: "The duration node certificates are issued for." - type: "integer" - format: "int64" - example: 7776000000000000 - ExternalCAs: - description: "Configuration for forwarding signing requests to an external certificate authority." - type: "array" - items: - type: "object" - properties: - Protocol: - description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." - type: "string" - enum: - - "cfssl" - default: "cfssl" - URL: - description: "URL where certificate signing requests should be sent." - type: "string" - Options: - description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." - type: "object" - additionalProperties: - type: "string" - CACert: - description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)." - type: "string" - SigningCACert: - description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format." - type: "string" - SigningCAKey: - description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format." - type: "string" - ForceRotate: - description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" - format: "uint64" - type: "integer" - EncryptionConfig: - description: "Parameters related to encryption-at-rest." - type: "object" - properties: - AutoLockManagers: - description: "If set, generate a key and use it to lock data stored on the managers." - type: "boolean" - example: false - TaskDefaults: - description: "Defaults for creating tasks in this cluster." - type: "object" - properties: - LogDriver: - description: | - The log driver to use for tasks created in the orchestrator if - unspecified by a service. - - Updating this value only affects new tasks. Existing tasks continue - to use their previously configured log driver until recreated. - type: "object" - properties: - Name: - description: | - The log driver to use as a default for new tasks. - type: "string" - example: "json-file" - Options: - description: | - Driver-specific options for the selectd log driver, specified - as key/value pairs. - type: "object" - additionalProperties: - type: "string" - example: - "max-file": "10" - "max-size": "100m" - - # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but - # without `JoinTokens`. - ClusterInfo: - description: | - ClusterInfo represents information about the swarm as is returned by the - "/info" endpoint. Join-tokens are not included. - x-nullable: true - type: "object" - properties: - ID: - description: "The ID of the swarm." - type: "string" - example: "abajmipo7b4xz5ip2nrla6b11" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the swarm was initialised in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the swarm was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/SwarmSpec" - TLSInfo: - $ref: "#/definitions/TLSInfo" - RootRotationInProgress: - description: "Whether there is currently a root CA rotation in progress for the swarm" - type: "boolean" - example: false - DataPathPort: - description: | - DataPathPort specifies the data path port number for data traffic. - Acceptable port range is 1024 to 49151. - If no port is set or is set to 0, the default port (4789) is used. - type: "integer" - format: "uint32" - default: 4789 - example: 4789 - DefaultAddrPool: - description: | - Default Address Pool specifies default subnet pools for global scope networks. - type: "array" - items: - type: "string" - format: "CIDR" - example: ["10.10.0.0/16", "20.20.0.0/16"] - SubnetSize: - description: | - SubnetSize specifies the subnet size of the networks created from the default subnet pool - type: "integer" - format: "uint32" - maximum: 29 - default: 24 - example: 24 - - JoinTokens: - description: | - JoinTokens contains the tokens workers and managers need to join the swarm. - type: "object" - properties: - Worker: - description: | - The token workers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" - Manager: - description: | - The token managers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - - Swarm: - type: "object" - allOf: - - $ref: "#/definitions/ClusterInfo" - - type: "object" - properties: - JoinTokens: - $ref: "#/definitions/JoinTokens" - - TaskSpec: - description: "User modifiable task configuration." - type: "object" - properties: - PluginSpec: - type: "object" - description: | - Plugin spec for the service. *(Experimental release only.)* - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - properties: - Name: - description: "The name or 'alias' to use for the plugin." - type: "string" - Remote: - description: "The plugin image reference to use." - type: "string" - Disabled: - description: "Disable the plugin once scheduled." - type: "boolean" - PluginPrivilege: - type: "array" - items: - description: "Describes a permission accepted by the user upon installing the plugin." - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - ContainerSpec: - type: "object" - description: | - Container spec for the service. - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - properties: - Image: - description: "The image name to use for the container" - type: "string" - Labels: - description: "User-defined key/value data." - type: "object" - additionalProperties: - type: "string" - Command: - description: "The command to be run in the image." - type: "array" - items: - type: "string" - Args: - description: "Arguments to the command." - type: "array" - items: - type: "string" - Hostname: - description: "The hostname to use for the container, as a valid RFC 1123 hostname." - type: "string" - Env: - description: "A list of environment variables in the form `VAR=value`." - type: "array" - items: - type: "string" - Dir: - description: "The working directory for commands to run in." - type: "string" - User: - description: "The user inside the container." - type: "string" - Groups: - type: "array" - description: "A list of additional groups that the container process will run as." - items: - type: "string" - Privileges: - type: "object" - description: "Security options for the container" - properties: - CredentialSpec: - type: "object" - description: "CredentialSpec for managed service account (Windows only)" - properties: - Config: - type: "string" - example: "0bt9dmxjvjiqermk6xrop3ekq" - description: | - Load credential spec from a Swarm Config with the given ID. - The specified config must also be present in the Configs field with the Runtime property set. - -


- - - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. - File: - type: "string" - example: "spec.json" - description: | - Load credential spec from this file. The file is read by the daemon, and must be present in the - `CredentialSpecs` subdirectory in the docker data directory, which defaults to - `C:\ProgramData\Docker\` on Windows. - - For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. - -


- - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. - Registry: - type: "string" - description: | - Load credential spec from this value in the Windows registry. The specified registry value must be - located in: - - `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` - -


- - - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. - SELinuxContext: - type: "object" - description: "SELinux labels of the container" - properties: - Disable: - type: "boolean" - description: "Disable SELinux" - User: - type: "string" - description: "SELinux user label" - Role: - type: "string" - description: "SELinux role label" - Type: - type: "string" - description: "SELinux type label" - Level: - type: "string" - description: "SELinux level label" - TTY: - description: "Whether a pseudo-TTY should be allocated." - type: "boolean" - OpenStdin: - description: "Open `stdin`" - type: "boolean" - ReadOnly: - description: "Mount the container's root filesystem as read only." - type: "boolean" - Mounts: - description: "Specification for mounts to be added to containers created as part of the service." - type: "array" - items: - $ref: "#/definitions/Mount" - StopSignal: - description: "Signal to stop the container." - type: "string" - StopGracePeriod: - description: "Amount of time to wait for the container to terminate before forcefully killing it." - type: "integer" - format: "int64" - HealthCheck: - $ref: "#/definitions/HealthConfig" - Hosts: - type: "array" - description: | - A list of hostname/IP mappings to add to the container's `hosts` - file. The format of extra hosts is specified in the - [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) - man page: - - IP_address canonical_hostname [aliases...] - items: - type: "string" - DNSConfig: - description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." - type: "object" - properties: - Nameservers: - description: "The IP addresses of the name servers." - type: "array" - items: - type: "string" - Search: - description: "A search list for host-name lookup." - type: "array" - items: - type: "string" - Options: - description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." - type: "array" - items: - type: "string" - Secrets: - description: "Secrets contains references to zero or more secrets that will be exposed to the service." - type: "array" - items: - type: "object" - properties: - File: - description: "File represents a specific target that is backed by a file." - type: "object" - properties: - Name: - description: "Name represents the final filename in the filesystem." - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - SecretID: - description: "SecretID represents the ID of the specific secret that we're referencing." - type: "string" - SecretName: - description: | - SecretName is the name of the secret that this references, but this is just provided for - lookup/display purposes. The secret in the reference will be identified by its ID. - type: "string" - Configs: - description: "Configs contains references to zero or more configs that will be exposed to the service." - type: "array" - items: - type: "object" - properties: - File: - description: | - File represents a specific target that is backed by a file. - -


- - > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive - type: "object" - properties: - Name: - description: "Name represents the final filename in the filesystem." - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - Runtime: - description: | - Runtime represents a target that is not mounted into the container but is used by the task - -


- - > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive - type: "object" - ConfigID: - description: "ConfigID represents the ID of the specific config that we're referencing." - type: "string" - ConfigName: - description: | - ConfigName is the name of the config that this references, but this is just provided for - lookup/display purposes. The config in the reference will be identified by its ID. - type: "string" - Isolation: - type: "string" - description: "Isolation technology of the containers running the service. (Windows only)" - enum: - - "default" - - "process" - - "hyperv" - Init: - description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." - type: "boolean" - x-nullable: true - Sysctls: - description: | - Set kernel namedspaced parameters (sysctls) in the container. - The Sysctls option on services accepts the same sysctls as the - are supported on containers. Note that while the same sysctls are - supported, no guarantees or checks are made about their - suitability for a clustered environment, and it's up to the user - to determine whether a given sysctl will work properly in a - Service. - type: "object" - additionalProperties: - type: "string" - NetworkAttachmentSpec: - description: | - Read-only spec type for non-swarm containers attached to swarm overlay - networks. - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - type: "object" - properties: - ContainerID: - description: "ID of the container represented by this task" - type: "string" - Resources: - description: "Resource requirements which apply to each individual container created as part of the service." - type: "object" - properties: - Limits: - description: "Define resources limits." - $ref: "#/definitions/ResourceObject" - Reservation: - description: "Define resources reservation." - $ref: "#/definitions/ResourceObject" - RestartPolicy: - description: "Specification for the restart policy which applies to containers created as part of this service." - type: "object" - properties: - Condition: - description: "Condition for restart." - type: "string" - enum: - - "none" - - "on-failure" - - "any" - Delay: - description: "Delay between restart attempts." - type: "integer" - format: "int64" - MaxAttempts: - description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." - type: "integer" - format: "int64" - default: 0 - Window: - description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." - type: "integer" - format: "int64" - default: 0 - Placement: - type: "object" - properties: - Constraints: - description: "An array of constraints." - type: "array" - items: - type: "string" - example: - - "node.hostname!=node3.corp.example.com" - - "node.role!=manager" - - "node.labels.type==production" - Preferences: - description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." - type: "array" - items: - type: "object" - properties: - Spread: - type: "object" - properties: - SpreadDescriptor: - description: "label descriptor, such as engine.labels.az" - type: "string" - example: - - Spread: - SpreadDescriptor: "node.labels.datacenter" - - Spread: - SpreadDescriptor: "node.labels.rack" - MaxReplicas: - description: "Maximum number of replicas for per node (default value is 0, which is unlimited)" - type: "integer" - format: "int64" - default: 0 - Platforms: - description: | - Platforms stores all the platforms that the service's image can - run on. This field is used in the platform filter for scheduling. - If empty, then the platform filter is off, meaning there are no - scheduling restrictions. - type: "array" - items: - $ref: "#/definitions/Platform" - ForceUpdate: - description: "A counter that triggers an update even if no relevant parameters have been changed." - type: "integer" - Runtime: - description: "Runtime is the type of runtime specified for the task executor." - type: "string" - Networks: - type: "array" - items: - type: "object" - properties: - Target: - type: "string" - Aliases: - type: "array" - items: - type: "string" - LogDriver: - description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." - type: "object" - properties: - Name: - type: "string" - Options: - type: "object" - additionalProperties: - type: "string" - - TaskState: - type: "string" - enum: - - "new" - - "allocated" - - "pending" - - "assigned" - - "accepted" - - "preparing" - - "ready" - - "starting" - - "running" - - "complete" - - "shutdown" - - "failed" - - "rejected" - - "remove" - - "orphaned" - - Task: - type: "object" - properties: - ID: - description: "The ID of the task." - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Name: - description: "Name of the task." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Spec: - $ref: "#/definitions/TaskSpec" - ServiceID: - description: "The ID of the service this task is part of." - type: "string" - Slot: - type: "integer" - NodeID: - description: "The ID of the node that this task is on." - type: "string" - AssignedGenericResources: - $ref: "#/definitions/GenericResources" - Status: - type: "object" - properties: - Timestamp: - type: "string" - format: "dateTime" - State: - $ref: "#/definitions/TaskState" - Message: - type: "string" - Err: - type: "string" - ContainerStatus: - type: "object" - properties: - ContainerID: - type: "string" - PID: - type: "integer" - ExitCode: - type: "integer" - DesiredState: - $ref: "#/definitions/TaskState" - example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - AssignedGenericResources: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - ServiceSpec: - description: "User modifiable configuration for a service." - properties: - Name: - description: "Name of the service." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - TaskTemplate: - $ref: "#/definitions/TaskSpec" - Mode: - description: "Scheduling mode for the service." - type: "object" - properties: - Replicated: - type: "object" - properties: - Replicas: - type: "integer" - format: "int64" - Global: - type: "object" - UpdateConfig: - description: "Specification for the update strategy of the service." - type: "object" - properties: - Parallelism: - description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." - type: "integer" - format: "int64" - Delay: - description: "Amount of time between updates, in nanoseconds." - type: "integer" - format: "int64" - FailureAction: - description: "Action to take if an updated task fails to run, or stops running during the update." - type: "string" - enum: - - "continue" - - "pause" - - "rollback" - Monitor: - description: "Amount of time to monitor each updated task for failures, in nanoseconds." - type: "integer" - format: "int64" - MaxFailureRatio: - description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." - type: "number" - default: 0 - Order: - description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." - type: "string" - enum: - - "stop-first" - - "start-first" - RollbackConfig: - description: "Specification for the rollback strategy of the service." - type: "object" - properties: - Parallelism: - description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)." - type: "integer" - format: "int64" - Delay: - description: "Amount of time between rollback iterations, in nanoseconds." - type: "integer" - format: "int64" - FailureAction: - description: "Action to take if an rolled back task fails to run, or stops running during the rollback." - type: "string" - enum: - - "continue" - - "pause" - Monitor: - description: "Amount of time to monitor each rolled back task for failures, in nanoseconds." - type: "integer" - format: "int64" - MaxFailureRatio: - description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1." - type: "number" - default: 0 - Order: - description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." - type: "string" - enum: - - "stop-first" - - "start-first" - Networks: - description: "Array of network names or IDs to attach the service to." - type: "array" - items: - type: "object" - properties: - Target: - type: "string" - Aliases: - type: "array" - items: - type: "string" - EndpointSpec: - $ref: "#/definitions/EndpointSpec" - - EndpointPortConfig: - type: "object" - properties: - Name: - type: "string" - Protocol: - type: "string" - enum: - - "tcp" - - "udp" - - "sctp" - TargetPort: - description: "The port inside the container." - type: "integer" - PublishedPort: - description: "The port on the swarm hosts." - type: "integer" - PublishMode: - description: | - The mode in which port is published. - -


- - - "ingress" makes the target port accessible on on every node, - regardless of whether there is a task for the service running on - that node or not. - - "host" bypasses the routing mesh and publish the port directly on - the swarm node where that service is running. - - type: "string" - enum: - - "ingress" - - "host" - default: "ingress" - example: "ingress" - - EndpointSpec: - description: "Properties that can be configured to access and load balance a service." - type: "object" - properties: - Mode: - description: "The mode of resolution to use for internal load balancing - between tasks." - type: "string" - enum: - - "vip" - - "dnsrr" - default: "vip" - Ports: - description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - - Service: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ServiceSpec" - Endpoint: - type: "object" - properties: - Spec: - $ref: "#/definitions/EndpointSpec" - Ports: - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - VirtualIPs: - type: "array" - items: - type: "object" - properties: - NetworkID: - type: "string" - Addr: - type: "string" - UpdateStatus: - description: "The status of a service update." - type: "object" - properties: - State: - type: "string" - enum: - - "updating" - - "paused" - - "completed" - StartedAt: - type: "string" - format: "dateTime" - CompletedAt: - type: "string" - format: "dateTime" - Message: - type: "string" - example: - ID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Version: - Index: 19 - CreatedAt: "2016-06-07T21:05:51.880065305Z" - UpdatedAt: "2016-06-07T21:07:29.962229872Z" - Spec: - Name: "hopeful_cori" - TaskTemplate: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Endpoint: - Spec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - VirtualIPs: - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.2/16" - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.3/16" - - ImageDeleteResponseItem: - type: "object" - properties: - Untagged: - description: "The image ID of an image that was untagged" - type: "string" - Deleted: - description: "The image ID of an image that was deleted" - type: "string" - - ServiceUpdateResponse: - type: "object" - properties: - Warnings: - description: "Optional warning messages" - type: "array" - items: - type: "string" - example: - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - - ContainerSummary: - type: "array" - items: - type: "object" - properties: - Id: - description: "The ID of this container" - type: "string" - x-go-name: "ID" - Names: - description: "The names that this container has been given" - type: "array" - items: - type: "string" - Image: - description: "The name of the image used when creating this container" - type: "string" - ImageID: - description: "The ID of the image that this container was created from" - type: "string" - Command: - description: "Command to run when starting the container" - type: "string" - Created: - description: "When the container was created" - type: "integer" - format: "int64" - Ports: - description: "The ports exposed by this container" - type: "array" - items: - $ref: "#/definitions/Port" - SizeRw: - description: "The size of files that have been created or changed by this container" - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container" - type: "integer" - format: "int64" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - State: - description: "The state of this container (e.g. `Exited`)" - type: "string" - Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" - type: "string" - HostConfig: - type: "object" - properties: - NetworkMode: - type: "string" - NetworkSettings: - description: "A summary of the container's network settings" - type: "object" - properties: - Networks: - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - Mounts: - type: "array" - items: - $ref: "#/definitions/Mount" - - Driver: - description: "Driver represents a driver (network, logging, secrets)." - type: "object" - required: [Name] - properties: - Name: - description: "Name of the driver." - type: "string" - x-nullable: false - example: "some-driver" - Options: - description: "Key/value map of driver-specific options." - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: - OptionA: "value for driver-specific option A" - OptionB: "value for driver-specific option B" - - SecretSpec: - type: "object" - properties: - Name: - description: "User-defined name of the secret." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Data: - description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - data to store as secret. - - This field is only used to _create_ a secret, and is not returned by - other endpoints. - type: "string" - example: "" - Driver: - description: "Name of the secrets driver used to fetch the secret's value from an external secret store" - $ref: "#/definitions/Driver" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Secret: - type: "object" - properties: - ID: - type: "string" - example: "blt1owaxmitz71s9v5zh81zun" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - Spec: - $ref: "#/definitions/SecretSpec" - - ConfigSpec: - type: "object" - properties: - Name: - description: "User-defined name of the config." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Data: - description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - config data. - type: "string" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Config: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ConfigSpec" - - SystemInfo: - type: "object" - properties: - ID: - description: | - Unique identifier of the daemon. - -


- - > **Note**: The format of the ID itself is not part of the API, and - > should not be considered stable. - type: "string" - example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" - Containers: - description: "Total number of containers on the host." - type: "integer" - example: 14 - ContainersRunning: - description: | - Number of containers with status `"running"`. - type: "integer" - example: 3 - ContainersPaused: - description: | - Number of containers with status `"paused"`. - type: "integer" - example: 1 - ContainersStopped: - description: | - Number of containers with status `"stopped"`. - type: "integer" - example: 10 - Images: - description: | - Total number of images on the host. - - Both _tagged_ and _untagged_ (dangling) images are counted. - type: "integer" - example: 508 - Driver: - description: "Name of the storage driver in use." - type: "string" - example: "overlay2" - DriverStatus: - description: | - Information specific to the storage driver, provided as - "label" / "value" pairs. - - This information is provided by the storage driver, and formatted - in a way consistent with the output of `docker info` on the command - line. - -


- - > **Note**: The information returned in this field, including the - > formatting of values and labels, should not be considered stable, - > and may change without notice. - type: "array" - items: - type: "array" - items: - type: "string" - example: - - ["Backing Filesystem", "extfs"] - - ["Supports d_type", "true"] - - ["Native Overlay Diff", "true"] - DockerRootDir: - description: | - Root directory of persistent Docker state. - - Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` - on Windows. - type: "string" - example: "/var/lib/docker" - SystemStatus: - description: | - Status information about this node (standalone Swarm API). - -


- - > **Note**: The information returned in this field is only propagated - > by the Swarm standalone API, and is empty (`null`) when using - > built-in swarm mode. - type: "array" - items: - type: "array" - items: - type: "string" - example: - - ["Role", "primary"] - - ["State", "Healthy"] - - ["Strategy", "spread"] - - ["Filters", "health, port, containerslots, dependency, affinity, constraint, whitelist"] - - ["Nodes", "2"] - - [" swarm-agent-00", "192.168.99.102:2376"] - - [" └ ID", "5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"] - - [" └ Status", "Healthy"] - - [" └ Containers", "1 (1 Running, 0 Paused, 0 Stopped)"] - - [" └ Reserved CPUs", "0 / 1"] - - [" └ Reserved Memory", "0 B / 1.021 GiB"] - - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] - - [" └ UpdatedAt", "2017-08-09T10:03:46Z"] - - [" └ ServerVersion", "17.06.0-ce"] - - [" swarm-manager", "192.168.99.101:2376"] - - [" └ ID", "TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"] - - [" └ Status", "Healthy"] - - [" └ Containers", "2 (2 Running, 0 Paused, 0 Stopped)"] - - [" └ Reserved CPUs", "0 / 1"] - - [" └ Reserved Memory", "0 B / 1.021 GiB"] - - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] - - [" └ UpdatedAt", "2017-08-09T10:04:11Z"] - - [" └ ServerVersion", "17.06.0-ce"] - Plugins: - $ref: "#/definitions/PluginsInfo" - MemoryLimit: - description: "Indicates if the host has memory limit support enabled." - type: "boolean" - example: true - SwapLimit: - description: "Indicates if the host has memory swap limit support enabled." - type: "boolean" - example: true - KernelMemory: - description: "Indicates if the host has kernel memory limit support enabled." - type: "boolean" - example: true - CpuCfsPeriod: - description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." - type: "boolean" - example: true - CpuCfsQuota: - description: "Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host." - type: "boolean" - example: true - CPUShares: - description: "Indicates if CPU Shares limiting is supported by the host." - type: "boolean" - example: true - CPUSet: - description: | - Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. - - See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) - type: "boolean" - example: true - PidsLimit: - description: "Indicates if the host kernel has PID limit support enabled." - type: "boolean" - example: true - OomKillDisable: - description: "Indicates if OOM killer disable is supported on the host." - type: "boolean" - IPv4Forwarding: - description: "Indicates IPv4 forwarding is enabled." - type: "boolean" - example: true - BridgeNfIptables: - description: "Indicates if `bridge-nf-call-iptables` is available on the host." - type: "boolean" - example: true - BridgeNfIp6tables: - description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." - type: "boolean" - example: true - Debug: - description: "Indicates if the daemon is running in debug-mode / with debug-level logging enabled." - type: "boolean" - example: true - NFd: - description: | - The total number of file Descriptors in use by the daemon process. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 64 - NGoroutines: - description: | - The number of goroutines that currently exist. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 174 - SystemTime: - description: | - Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) - format with nano-seconds. - type: "string" - example: "2017-08-08T20:28:29.06202363Z" - LoggingDriver: - description: | - The logging driver to use as a default for new containers. - type: "string" - CgroupDriver: - description: | - The driver to use for managing cgroups. - type: "string" - enum: ["cgroupfs", "systemd"] - default: "cgroupfs" - example: "cgroupfs" - NEventsListener: - description: "Number of event listeners subscribed." - type: "integer" - example: 30 - KernelVersion: - description: | - Kernel version of the host. - - On Linux, this information obtained from `uname`. On Windows this - information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ - registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. - type: "string" - example: "4.9.38-moby" - OperatingSystem: - description: | - Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" - or "Windows Server 2016 Datacenter" - type: "string" - example: "Alpine Linux v3.5" - OSType: - description: | - Generic type of the operating system of the host, as returned by the - Go runtime (`GOOS`). - - Currently returned values are "linux" and "windows". A full list of - possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). - type: "string" - example: "linux" - Architecture: - description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). - - A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). - type: "string" - example: "x86_64" - NCPU: - description: | - The number of logical CPUs usable by the daemon. - - The number of available CPUs is checked by querying the operating - system when the daemon starts. Changes to operating system CPU - allocation after the daemon is started are not reflected. - type: "integer" - example: 4 - MemTotal: - description: | - Total amount of physical memory available on the host, in kilobytes (kB). - type: "integer" - format: "int64" - example: 2095882240 - - IndexServerAddress: - description: | - Address / URL of the index server that is used for image search, - and as a default for user authentication for Docker Hub and Docker Cloud. - default: "https://index.docker.io/v1/" - type: "string" - example: "https://index.docker.io/v1/" - RegistryConfig: - $ref: "#/definitions/RegistryServiceConfig" - GenericResources: - $ref: "#/definitions/GenericResources" - HttpProxy: - description: | - HTTP-proxy configured for the daemon. This value is obtained from the - [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL - are masked in the API response. - - Containers do not automatically inherit this configuration. - type: "string" - example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" - HttpsProxy: - description: | - HTTPS-proxy configured for the daemon. This value is obtained from the - [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL - are masked in the API response. - - Containers do not automatically inherit this configuration. - type: "string" - example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" - NoProxy: - description: | - Comma-separated list of domain extensions for which no proxy should be - used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) - environment variable. - - Containers do not automatically inherit this configuration. - type: "string" - example: "*.local, 169.254/16" - Name: - description: "Hostname of the host." - type: "string" - example: "node5.corp.example.com" - Labels: - description: | - User-defined labels (key/value metadata) as set on the daemon. - -


- - > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, - > set through the daemon configuration, and _node_ labels, set from a - > manager node in the Swarm. Node labels are not included in this - > field. Node labels can be retrieved using the `/nodes/(id)` endpoint - > on a manager node in the Swarm. - type: "array" - items: - type: "string" - example: ["storage=ssd", "production"] - ExperimentalBuild: - description: | - Indicates if experimental features are enabled on the daemon. - type: "boolean" - example: true - ServerVersion: - description: | - Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. - type: "string" - example: "17.06.0-ce" - ClusterStore: - description: | - URL of the distributed storage backend. - - - The storage backend is used for multihost networking (to store - network and endpoint information) and by the node discovery mechanism. - -


- - > **Note**: This field is only propagated when using standalone Swarm - > mode, and overlay networking using an external k/v store. Overlay - > networks with Swarm mode enabled use the built-in raft store, and - > this field will be empty. - type: "string" - example: "consul://consul.corp.example.com:8600/some/path" - ClusterAdvertise: - description: | - The network endpoint that the Engine advertises for the purpose of - node discovery. ClusterAdvertise is a `host:port` combination on which - the daemon is reachable by other hosts. - -


- - > **Note**: This field is only propagated when using standalone Swarm - > mode, and overlay networking using an external k/v store. Overlay - > networks with Swarm mode enabled use the built-in raft store, and - > this field will be empty. - type: "string" - example: "node5.corp.example.com:8000" - Runtimes: - description: | - List of [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtimes configured on the daemon. Keys hold the "name" used to - reference the runtime. - - The Docker daemon relies on an OCI compliant runtime (invoked via the - `containerd` daemon) as its interface to the Linux kernel namespaces, - cgroups, and SELinux. - - The default runtime is `runc`, and automatically configured. Additional - runtimes can be configured by the user and will be listed here. - type: "object" - additionalProperties: - $ref: "#/definitions/Runtime" - default: - runc: - path: "runc" - example: - runc: - path: "runc" - runc-master: - path: "/go/bin/runc" - custom: - path: "/usr/local/bin/my-oci-runtime" - runtimeArgs: ["--debug", "--systemd-cgroup=false"] - DefaultRuntime: - description: | - Name of the default OCI runtime that is used when starting containers. - - The default can be overridden per-container at create time. - type: "string" - default: "runc" - example: "runc" - Swarm: - $ref: "#/definitions/SwarmInfo" - LiveRestoreEnabled: - description: | - Indicates if live restore is enabled. - - If enabled, containers are kept running when the daemon is shutdown - or upon daemon start if running containers are detected. - type: "boolean" - default: false - example: false - Isolation: - description: | - Represents the isolation technology to use as a default for containers. - The supported values are platform-specific. - - If no isolation value is specified on daemon start, on Windows client, - the default is `hyperv`, and on Windows server, the default is `process`. - - This option is currently not used on other platforms. - default: "default" - type: "string" - enum: - - "default" - - "hyperv" - - "process" - InitBinary: - description: | - Name and, optional, path of the `docker-init` binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "docker-init" - ContainerdCommit: - $ref: "#/definitions/Commit" - RuncCommit: - $ref: "#/definitions/Commit" - InitCommit: - $ref: "#/definitions/Commit" - SecurityOptions: - description: | - List of security features that are enabled on the daemon, such as - apparmor, seccomp, SELinux, and user-namespaces (userns). - - Additional configuration options for each security feature may - be present, and are included as a comma-separated list of key/value - pairs. - type: "array" - items: - type: "string" - example: - - "name=apparmor" - - "name=seccomp,profile=default" - - "name=selinux" - - "name=userns" - ProductLicense: - description: | - Reports a summary of the product license on the daemon. - - If a commercial license has been applied to the daemon, information - such as number of nodes, and expiration are included. - type: "string" - example: "Community Engine" - Warnings: - description: | - List of warnings / informational messages about missing features, or - issues related to the daemon configuration. - - These messages can be printed by the client as information to the user. - type: "array" - items: - type: "string" - example: - - "WARNING: No memory limit support" - - "WARNING: bridge-nf-call-iptables is disabled" - - "WARNING: bridge-nf-call-ip6tables is disabled" - - - # PluginsInfo is a temp struct holding Plugins name - # registered with docker daemon. It is used by Info struct - PluginsInfo: - description: | - Available plugins per type. - -


- - > **Note**: Only unmanaged (V1) plugins are included in this list. - > V1 plugins are "lazily" loaded, and are not returned in this list - > if there is no resource using the plugin. - type: "object" - properties: - Volume: - description: "Names of available volume-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["local"] - Network: - description: "Names of available network-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] - Authorization: - description: "Names of available authorization plugins." - type: "array" - items: - type: "string" - example: ["img-authz-plugin", "hbm"] - Log: - description: "Names of available logging-drivers, and logging-driver plugins." - type: "array" - items: - type: "string" - example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] - - - RegistryServiceConfig: - description: | - RegistryServiceConfig stores daemon registry services configuration. - type: "object" - x-nullable: true - properties: - AllowNondistributableArtifactsCIDRs: - description: | - List of IP ranges to which nondistributable artifacts can be pushed, - using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - AllowNondistributableArtifactsHostnames: - description: | - List of registry hostnames to which nondistributable artifacts can be - pushed, using the format `[:]` or `[:]`. - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - type: "array" - items: - type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] - InsecureRegistryCIDRs: - description: | - List of IP ranges of insecure registries, using the CIDR syntax - ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries - accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates - from unknown CAs) communication. - - By default, local registries (`127.0.0.0/8`) are configured as - insecure. All other registries are secure. Communicating with an - insecure registry is not possible if the daemon assumes that registry - is secure. - - This configuration override this behavior, insecure communication with - registries whose resolved IP address is within the subnet described by - the CIDR syntax. - - Registries can also be marked insecure by hostname. Those registries - are listed under `IndexConfigs` and have their `Secure` field set to - `false`. - - > **Warning**: Using this option can be useful when running a local - > registry, but introduces security vulnerabilities. This option - > should therefore ONLY be used for testing purposes. For increased - > security, users should add their CA to their system's list of trusted - > CAs instead of enabling this option. - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - IndexConfigs: - type: "object" - additionalProperties: - $ref: "#/definitions/IndexInfo" - example: - "127.0.0.1:5000": - "Name": "127.0.0.1:5000" - "Mirrors": [] - "Secure": false - "Official": false - "[2001:db8:a0b:12f0::1]:80": - "Name": "[2001:db8:a0b:12f0::1]:80" - "Mirrors": [] - "Secure": false - "Official": false - "docker.io": - Name: "docker.io" - Mirrors: ["https://hub-mirror.corp.example.com:5000/"] - Secure: true - Official: true - "registry.internal.corp.example.com:3000": - Name: "registry.internal.corp.example.com:3000" - Mirrors: [] - Secure: false - Official: false - Mirrors: - description: | - List of registry URLs that act as a mirror for the official - (`docker.io`) registry. - - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://[2001:db8:a0b:12f0::1]/" - - IndexInfo: - description: - IndexInfo contains information about a registry. - type: "object" - x-nullable: true - properties: - Name: - description: | - Name of the registry, such as "docker.io". - type: "string" - example: "docker.io" - Mirrors: - description: | - List of mirrors, expressed as URIs. - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://registry-2.docker.io/" - - "https://registry-3.docker.io/" - Secure: - description: | - Indicates if the registry is part of the list of insecure - registries. - - If `false`, the registry is insecure. Insecure registries accept - un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from - unknown CAs) communication. - - > **Warning**: Insecure registries can be useful when running a local - > registry. However, because its use creates security vulnerabilities - > it should ONLY be enabled for testing purposes. For increased - > security, users should add their CA to their system's list of - > trusted CAs instead of enabling this option. - type: "boolean" - example: true - Official: - description: | - Indicates whether this is an official registry (i.e., Docker Hub / docker.io) - type: "boolean" - example: true - - Runtime: - description: | - Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtime. - - The runtime is invoked by the daemon via the `containerd` daemon. OCI - runtimes act as an interface to the Linux kernel namespaces, cgroups, - and SELinux. - type: "object" - properties: - path: - description: | - Name and, optional, path, of the OCI executable binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "/usr/local/bin/my-oci-runtime" - runtimeArgs: - description: | - List of command-line arguments to pass to the runtime when invoked. - type: "array" - x-nullable: true - items: - type: "string" - example: ["--debug", "--systemd-cgroup=false"] - - Commit: - description: | - Commit holds the Git-commit (SHA1) that a binary was built from, as - reported in the version-string of external tools, such as `containerd`, - or `runC`. - type: "object" - properties: - ID: - description: "Actual commit ID of external tool." - type: "string" - example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" - Expected: - description: | - Commit ID of external tool expected by dockerd as set at build time. - type: "string" - example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" - - SwarmInfo: - description: | - Represents generic information about swarm. - type: "object" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - default: "" - example: "k67qz4598weg5unwwffg6z1m1" - NodeAddr: - description: | - IP address at which this node can be reached by other nodes in the - swarm. - type: "string" - default: "" - example: "10.0.0.46" - LocalNodeState: - $ref: "#/definitions/LocalNodeState" - ControlAvailable: - type: "boolean" - default: false - example: true - Error: - type: "string" - default: "" - RemoteManagers: - description: | - List of ID's and addresses of other managers in the swarm. - type: "array" - default: null - x-nullable: true - items: - $ref: "#/definitions/PeerNode" - example: - - NodeID: "71izy0goik036k48jg985xnds" - Addr: "10.0.0.158:2377" - - NodeID: "79y6h1o4gv8n120drcprv5nmc" - Addr: "10.0.0.159:2377" - - NodeID: "k67qz4598weg5unwwffg6z1m1" - Addr: "10.0.0.46:2377" - Nodes: - description: "Total number of nodes in the swarm." - type: "integer" - x-nullable: true - example: 4 - Managers: - description: "Total number of managers in the swarm." - type: "integer" - x-nullable: true - example: 3 - Cluster: - $ref: "#/definitions/ClusterInfo" - - LocalNodeState: - description: "Current local status of this node." - type: "string" - default: "" - enum: - - "" - - "inactive" - - "pending" - - "active" - - "error" - - "locked" - example: "active" - - PeerNode: - description: "Represents a peer-node in the swarm" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - Addr: - description: | - IP address and ports at which this node can be reached. - type: "string" - -paths: - /containers/json: - get: - summary: "List containers" - description: | - Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect). - - Note that it uses a different, smaller representation of a container than inspecting a single container. For example, - the list of linked containers is not propagated . - operationId: "ContainerList" - produces: - - "application/json" - parameters: - - name: "all" - in: "query" - description: "Return all containers. By default, only running containers are shown" - type: "boolean" - default: false - - name: "limit" - in: "query" - description: "Return this number of most recently created containers, including non-running ones." - type: "integer" - - name: "size" - in: "query" - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - - - `ancestor`=(`[:]`, ``, or ``) - - `before`=(`` or ``) - - `expose`=(`[/]`|`/[]`) - - `exited=` containers with exit code of `` - - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - - `id=` a container's ID - - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - - `is-task=`(`true`|`false`) - - `label=key` or `label="key=value"` of a container label - - `name=` a container's name - - `network`=(`` or ``) - - `publish`=(`[/]`|`/[]`) - - `since`=(`` or ``) - - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - - `volume`=(`` or ``) - type: "string" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ContainerSummary" - examples: - application/json: - - Id: "8dfafdbc3a40" - Names: - - "/boring_feynman" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 1" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: - - PrivatePort: 2222 - PublicPort: 3333 - Type: "tcp" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:02" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - - Id: "9cd87474be90" - Names: - - "/coolName" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 222222" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.8" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:08" - Mounts: [] - - Id: "3176a2479c92" - Names: - - "/sleepy_dog" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 3333333333333333" - Created: 1367854154 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.6" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:06" - Mounts: [] - - Id: "4cb07b47f9fb" - Names: - - "/running_cat" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 444444444444444444444444444444444" - Created: 1367854152 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.5" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:05" - Mounts: [] - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/create: - post: - summary: "Create a container" - operationId: "ContainerCreate" - consumes: - - "application/json" - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "name" - in: "query" - description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`." - type: "string" - pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - - name: "body" - in: "body" - description: "Container to create" - schema: - allOf: - - $ref: "#/definitions/ContainerConfig" - - type: "object" - properties: - HostConfig: - $ref: "#/definitions/HostConfig" - NetworkingConfig: - description: "This container's networking configuration." - type: "object" - properties: - EndpointsConfig: - description: "A mapping of network name to endpoint configuration for that network." - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - example: - Hostname: "" - Domainname: "" - User: "" - AttachStdin: false - AttachStdout: true - AttachStderr: true - Tty: false - OpenStdin: false - StdinOnce: false - Env: - - "FOO=bar" - - "BAZ=quux" - Cmd: - - "date" - Entrypoint: "" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - Volumes: - /volumes/data: {} - WorkingDir: "" - NetworkDisabled: false - MacAddress: "12:34:56:78:9a:bc" - ExposedPorts: - 22/tcp: {} - StopSignal: "SIGTERM" - StopTimeout: 10 - HostConfig: - Binds: - - "/tmp:/tmp" - Links: - - "redis3:redis" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - KernelMemory: 0 - NanoCPUs: 500000 - CpuPercent: 80 - CpuShares: 512 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpuQuota: 50000 - CpusetCpus: "0,1" - CpusetMems: "0,1" - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 300 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceWriteIOps: - - {} - MemorySwappiness: 60 - OomKillDisable: false - OomScoreAdj: 500 - PidMode: "" - PidsLimit: 0 - PortBindings: - 22/tcp: - - HostPort: "11022" - PublishAllPorts: false - Privileged: false - ReadonlyRootfs: false - Dns: - - "8.8.8.8" - DnsOptions: - - "" - DnsSearch: - - "" - VolumesFrom: - - "parent" - - "other:ro" - CapAdd: - - "NET_ADMIN" - CapDrop: - - "MKNOD" - GroupAdd: - - "newgroup" - RestartPolicy: - Name: "" - MaximumRetryCount: 0 - AutoRemove: true - NetworkMode: "bridge" - Devices: [] - Ulimits: - - {} - LogConfig: - Type: "json-file" - Config: {} - SecurityOpt: [] - StorageOpt: {} - CgroupParent: "" - VolumeDriver: "" - ShmSize: 67108864 - NetworkingConfig: - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - - required: true - responses: - 201: - description: "Container created successfully" - schema: - type: "object" - title: "ContainerCreateResponse" - description: "OK response to ContainerCreate operation" - required: [Id, Warnings] - properties: - Id: - description: "The ID of the created container" - type: "string" - x-nullable: false - Warnings: - description: "Warnings encountered when creating the container" - type: "array" - x-nullable: false - items: - type: "string" - examples: - application/json: - Id: "e90e34656806" - Warnings: [] - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/{id}/json: - get: - summary: "Inspect a container" - description: "Return low-level information about a container." - operationId: "ContainerInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "ContainerInspectResponse" - properties: - Id: - description: "The ID of the container" - type: "string" - Created: - description: "The time the container was created" - type: "string" - Path: - description: "The path to the command being run" - type: "string" - Args: - description: "The arguments to the command being run" - type: "array" - items: - type: "string" - State: - description: "The state of the container." - type: "object" - properties: - Status: - description: | - The status of the container. For example, `"running"` or `"exited"`. - type: "string" - enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] - Running: - description: | - Whether this container is running. - - Note that a running container can be _paused_. The `Running` and `Paused` - booleans are not mutually exclusive: - - When pausing a container (on Linux), the cgroups freezer is used to suspend - all processes in the container. Freezing the process requires the process to - be running. As a result, paused containers are both `Running` _and_ `Paused`. - - Use the `Status` field instead to determine if a container's state is "running". - type: "boolean" - Paused: - description: "Whether this container is paused." - type: "boolean" - Restarting: - description: "Whether this container is restarting." - type: "boolean" - OOMKilled: - description: "Whether this container has been killed because it ran out of memory." - type: "boolean" - Dead: - type: "boolean" - Pid: - description: "The process ID of this container" - type: "integer" - ExitCode: - description: "The last exit code of this container" - type: "integer" - Error: - type: "string" - StartedAt: - description: "The time when this container was last started." - type: "string" - FinishedAt: - description: "The time when this container last exited." - type: "string" - Image: - description: "The container's image" - type: "string" - ResolvConfPath: - type: "string" - HostnamePath: - type: "string" - HostsPath: - type: "string" - LogPath: - type: "string" - Node: - description: "TODO" - type: "object" - Name: - type: "string" - RestartCount: - type: "integer" - Driver: - type: "string" - MountLabel: - type: "string" - ProcessLabel: - type: "string" - AppArmorProfile: - type: "string" - ExecIDs: - description: "IDs of exec instances that are running in the container." - type: "array" - items: - type: "string" - x-nullable: true - HostConfig: - $ref: "#/definitions/HostConfig" - GraphDriver: - $ref: "#/definitions/GraphDriverData" - SizeRw: - description: "The size of files that have been created or changed by this container." - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container." - type: "integer" - format: "int64" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - Config: - $ref: "#/definitions/ContainerConfig" - NetworkSettings: - $ref: "#/definitions/NetworkSettings" - examples: - application/json: - AppArmorProfile: "" - Args: - - "-c" - - "exit 9" - Config: - AttachStderr: true - AttachStdin: false - AttachStdout: true - Cmd: - - "/bin/sh" - - "-c" - - "exit 9" - Domainname: "" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Hostname: "ba033ac44011" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - MacAddress: "" - NetworkDisabled: false - OpenStdin: false - StdinOnce: false - Tty: false - User: "" - Volumes: - /volumes/data: {} - WorkingDir: "" - StopSignal: "SIGTERM" - StopTimeout: 10 - Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" - ExecIDs: - - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" - HostConfig: - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 0 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteIOps: - - {} - ContainerIDFile: "" - CpusetCpus: "" - CpusetMems: "" - CpuPercent: 80 - CpuShares: 0 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - Devices: [] - IpcMode: "" - LxcConf: [] - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - KernelMemory: 0 - OomKillDisable: false - OomScoreAdj: 500 - NetworkMode: "bridge" - PidMode: "" - PortBindings: {} - Privileged: false - ReadonlyRootfs: false - PublishAllPorts: false - RestartPolicy: - MaximumRetryCount: 2 - Name: "on-failure" - LogConfig: - Type: "json-file" - Sysctls: - net.ipv4.ip_forward: "1" - Ulimits: - - {} - VolumeDriver: "" - ShmSize: 67108864 - HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" - HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" - LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" - Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" - Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" - MountLabel: "" - Name: "/boring_euclid" - NetworkSettings: - Bridge: "" - SandboxID: "" - HairpinMode: false - LinkLocalIPv6Address: "" - LinkLocalIPv6PrefixLen: 0 - SandboxKey: "" - EndpointID: "" - Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - IPAddress: "" - IPPrefixLen: 0 - IPv6Gateway: "" - MacAddress: "" - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Path: "/bin/sh" - ProcessLabel: "" - ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" - RestartCount: 1 - State: - Error: "" - ExitCode: 9 - FinishedAt: "2015-01-06T15:47:32.080254511Z" - OOMKilled: false - Dead: false - Paused: false - Pid: 0 - Restarting: false - Running: true - StartedAt: "2015-01-06T15:47:32.072697474Z" - Status: "running" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "size" - in: "query" - type: "boolean" - default: false - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" - tags: ["Container"] - /containers/{id}/top: - get: - summary: "List processes running inside a container" - description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." - operationId: "ContainerTop" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "ContainerTopResponse" - description: "OK response to ContainerTop operation" - properties: - Titles: - description: "The ps column titles" - type: "array" - items: - type: "string" - Processes: - description: "Each process running in the container, where each is process is an array of values corresponding to the titles" - type: "array" - items: - type: "array" - items: - type: "string" - examples: - application/json: - Titles: - - "UID" - - "PID" - - "PPID" - - "C" - - "STIME" - - "TTY" - - "TIME" - - "CMD" - Processes: - - - - "root" - - "13642" - - "882" - - "0" - - "17:03" - - "pts/0" - - "00:00:00" - - "/bin/bash" - - - - "root" - - "13735" - - "13642" - - "0" - - "17:06" - - "pts/0" - - "00:00:00" - - "sleep 10" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "ps_args" - in: "query" - description: "The arguments to pass to `ps`. For example, `aux`" - type: "string" - default: "-ef" - tags: ["Container"] - /containers/{id}/logs: - get: - summary: "Get container logs" - description: | - Get `stdout` and `stderr` logs from a container. - - Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. - operationId: "ContainerLogs" - responses: - 200: - description: | - logs returned as a stream in response body. - For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not - set Content-Type. - schema: - type: "string" - format: "binary" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "until" - in: "query" - description: "Only return logs before this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." - type: "string" - default: "all" - tags: ["Container"] - /containers/{id}/changes: - get: - summary: "Get changes on a container’s filesystem" - description: | - Returns which files in a container's filesystem have been added, deleted, - or modified. The `Kind` of modification can be one of: - - - `0`: Modified - - `1`: Added - - `2`: Deleted - operationId: "ContainerChanges" - produces: ["application/json"] - responses: - 200: - description: "The list of changes" - schema: - type: "array" - items: - type: "object" - x-go-name: "ContainerChangeResponseItem" - title: "ContainerChangeResponseItem" - description: "change item in response to ContainerChanges operation" - required: [Path, Kind] - properties: - Path: - description: "Path to file that has changed" - type: "string" - x-nullable: false - Kind: - description: "Kind of change" - type: "integer" - format: "uint8" - enum: [0, 1, 2] - x-nullable: false - examples: - application/json: - - Path: "/dev" - Kind: 0 - - Path: "/dev/kmsg" - Kind: 1 - - Path: "/test" - Kind: 1 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/export: - get: - summary: "Export a container" - description: "Export the contents of a container as a tarball." - operationId: "ContainerExport" - produces: - - "application/octet-stream" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/stats: - get: - summary: "Get container stats based on resource usage" - description: | - This endpoint returns a live stream of a container’s resource usage - statistics. - - The `precpu_stats` is the CPU statistic of the *previous* read, and is - used to calculate the CPU usage percentage. It is not an exact copy - of the `cpu_stats` field. - - If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is - nil then for compatibility with older daemons the length of the - corresponding `cpu_usage.percpu_usage` array should be used. - operationId: "ContainerStats" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - type: "object" - examples: - application/json: - read: "2015-01-08T22:57:31.547920715Z" - pids_stats: - current: 3 - networks: - eth0: - rx_bytes: 5338 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 36 - tx_bytes: 648 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 8 - eth5: - rx_bytes: 4641 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 26 - tx_bytes: 690 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 9 - memory_stats: - stats: - total_pgmajfault: 0 - cache: 0 - mapped_file: 0 - total_inactive_file: 0 - pgpgout: 414 - rss: 6537216 - total_mapped_file: 0 - writeback: 0 - unevictable: 0 - pgpgin: 477 - total_unevictable: 0 - pgmajfault: 0 - total_rss: 6537216 - total_rss_huge: 6291456 - total_writeback: 0 - total_inactive_anon: 0 - rss_huge: 6291456 - hierarchical_memory_limit: 67108864 - total_pgfault: 964 - total_active_file: 0 - active_anon: 6537216 - total_active_anon: 6537216 - total_pgpgout: 414 - total_cache: 0 - inactive_anon: 0 - active_file: 0 - pgfault: 964 - inactive_file: 0 - total_pgpgin: 477 - max_usage: 6651904 - usage: 6537216 - failcnt: 0 - limit: 67108864 - blkio_stats: {} - cpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24472255 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100215355 - usage_in_kernelmode: 30000000 - system_cpu_usage: 739306590000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - precpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24350896 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100093996 - usage_in_kernelmode: 30000000 - system_cpu_usage: 9492140000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "stream" - in: "query" - description: "Stream the output. If false, the stats will be output once and then it will disconnect." - type: "boolean" - default: true - tags: ["Container"] - /containers/{id}/resize: - post: - summary: "Resize a container TTY" - description: "Resize the TTY for a container. You must restart the container for the resize to take effect." - operationId: "ContainerResize" - consumes: - - "application/octet-stream" - produces: - - "text/plain" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "cannot resize container" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "h" - in: "query" - description: "Height of the tty session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the tty session in characters" - type: "integer" - tags: ["Container"] - /containers/{id}/start: - post: - summary: "Start a container" - operationId: "ContainerStart" - responses: - 204: - description: "no error" - 304: - description: "container already started" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." - type: "string" - tags: ["Container"] - /containers/{id}/stop: - post: - summary: "Stop a container" - operationId: "ContainerStop" - responses: - 204: - description: "no error" - 304: - description: "container already stopped" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/restart: - post: - summary: "Restart a container" - operationId: "ContainerRestart" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/kill: - post: - summary: "Kill a container" - description: "Send a POSIX signal to a container, defaulting to killing to the container." - operationId: "ContainerKill" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is not running" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" - type: "string" - default: "SIGKILL" - tags: ["Container"] - /containers/{id}/update: - post: - summary: "Update a container" - description: "Change various configuration options of a container without having to recreate it." - operationId: "ContainerUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "The container has been updated." - schema: - type: "object" - title: "ContainerUpdateResponse" - description: "OK response to ContainerUpdate operation" - properties: - Warnings: - type: "array" - items: - type: "string" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "update" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - example: - BlkioWeight: 300 - CpuShares: 512 - CpuPeriod: 100000 - CpuQuota: 50000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpusetCpus: "0,1" - CpusetMems: "0" - Memory: 314572800 - MemorySwap: 514288000 - MemoryReservation: 209715200 - KernelMemory: 52428800 - RestartPolicy: - MaximumRetryCount: 4 - Name: "on-failure" - tags: ["Container"] - /containers/{id}/rename: - post: - summary: "Rename a container" - operationId: "ContainerRename" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "name already in use" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "name" - in: "query" - required: true - description: "New name for the container" - type: "string" - tags: ["Container"] - /containers/{id}/pause: - post: - summary: "Pause a container" - description: | - Use the cgroups freezer to suspend all processes in a container. - - Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. - operationId: "ContainerPause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/unpause: - post: - summary: "Unpause a container" - description: "Resume a container which has been paused." - operationId: "ContainerUnpause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/attach: - post: - summary: "Attach to a container" - description: | - Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. - - Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. - - See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. - - ### Hijacking - - This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. - - This is the response from the daemon for an attach request: - - ``` - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - [STREAM] - ``` - - After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. - - To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 - Upgrade: tcp - Connection: Upgrade - ``` - - The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Content-Type: application/vnd.docker.raw-stream - Connection: Upgrade - Upgrade: tcp - - [STREAM] - ``` - - ### Stream format - - When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. - - The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). - - It is encoded on the first eight bytes like this: - - ```go - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - ``` - - `STREAM_TYPE` can be: - - - 0: `stdin` (is written on `stdout`) - - 1: `stdout` - - 2: `stderr` - - `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. - - Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. - - The simplest way to implement this protocol is the following: - - 1. Read 8 bytes. - 2. Choose `stdout` or `stderr` depending on the first byte. - 3. Extract the frame size from the last four bytes. - 4. Read the extracted size and output it on the correct output. - 5. Goto 1. - - ### Stream format when using a TTY - - When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. - - operationId: "ContainerAttach" - produces: - - "application/vnd.docker.raw-stream" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." - type: "string" - - name: "logs" - in: "query" - description: | - Replay previous logs from the container. - - This is useful for attaching to a container that has started and you want to output everything since the container started. - - If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. - type: "boolean" - default: false - - name: "stream" - in: "query" - description: "Stream attached streams from the time the request was made onwards" - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/attach/ws: - get: - summary: "Attach to a container via a websocket" - operationId: "ContainerAttachWebsocket" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." - type: "string" - - name: "logs" - in: "query" - description: "Return logs" - type: "boolean" - default: false - - name: "stream" - in: "query" - description: "Return stream" - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/wait: - post: - summary: "Wait for a container" - description: "Block until a container stops, then returns the exit code." - operationId: "ContainerWait" - produces: ["application/json"] - responses: - 200: - description: "The container has exit." - schema: - type: "object" - title: "ContainerWaitResponse" - description: "OK response to ContainerWait operation" - required: [StatusCode] - properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - x-nullable: false - Error: - description: "container waiting error, if any" - type: "object" - properties: - Message: - description: "Details of an error" - type: "string" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "condition" - in: "query" - description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'." - type: "string" - default: "not-running" - tags: ["Container"] - /containers/{id}: - delete: - summary: "Remove a container" - operationId: "ContainerDelete" - responses: - 204: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "v" - in: "query" - description: "Remove the volumes associated with the container." - type: "boolean" - default: false - - name: "force" - in: "query" - description: "If the container is running, kill it before removing it." - type: "boolean" - default: false - - name: "link" - in: "query" - description: "Remove the specified link associated with the container." - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/archive: - head: - summary: "Get information about files in a container" - description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." - operationId: "ContainerArchiveInfo" - responses: - 200: - description: "no error" - headers: - X-Docker-Container-Path-Stat: - type: "string" - description: "A base64 - encoded JSON object with some filesystem header information about the path" - 400: - description: "Bad parameter" - schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." - type: "string" - x-nullable: false - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - get: - summary: "Get an archive of a filesystem resource in a container" - description: "Get a tar archive of a resource in the filesystem of container id." - operationId: "ContainerArchive" - produces: ["application/x-tar"] - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." - type: "string" - x-nullable: false - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - put: - summary: "Extract an archive of files or folders to a directory in a container" - description: "Upload a tar archive to be extracted to a path in the filesystem of container id." - operationId: "PutContainerArchive" - consumes: ["application/x-tar", "application/octet-stream"] - responses: - 200: - description: "The content was extracted successfully" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "Permission denied, the volume or container rootfs is marked as read-only." - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such container or path does not exist inside the container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Path to a directory in the container to extract the archive’s contents into. " - type: "string" - - name: "noOverwriteDirNonDir" - in: "query" - description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." - type: "string" - - name: "inputStream" - in: "body" - required: true - description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." - schema: - type: "string" - tags: ["Container"] - /containers/prune: - post: - summary: "Delete stopped containers" - produces: - - "application/json" - operationId: "ContainerPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ContainerPruneResponse" - properties: - ContainersDeleted: - description: "Container IDs that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /images/json: - get: - summary: "List Images" - description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." - operationId: "ImageList" - produces: - - "application/json" - responses: - 200: - description: "Summary image data for the images matching the query" - schema: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - examples: - application/json: - - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - ParentId: "" - RepoTags: - - "ubuntu:12.04" - - "ubuntu:precise" - RepoDigests: - - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" - Created: 1474925151 - Size: 103579269 - VirtualSize: 103579269 - SharedSize: 0 - Labels: {} - Containers: 2 - - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" - ParentId: "" - RepoTags: - - "ubuntu:12.10" - - "ubuntu:quantal" - RepoDigests: - - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" - Created: 1403128455 - Size: 172064416 - VirtualSize: 172064416 - SharedSize: 0 - Labels: {} - Containers: 5 - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "all" - in: "query" - description: "Show all images. Only images from a final layer (no children) are shown by default." - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - - `before`=(`[:]`, `` or ``) - - `dangling=true` - - `label=key` or `label="key=value"` of an image label - - `reference`=(`[:]`) - - `since`=(`[:]`, `` or ``) - type: "string" - - name: "digests" - in: "query" - description: "Show digest information as a `RepoDigests` field on each image." - type: "boolean" - default: false - tags: ["Image"] - /build: - post: - summary: "Build an image" - description: | - Build an image from a tar archive with a `Dockerfile` in it. - - The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). - - The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. - - The build is canceled if the client drops the connection by quitting or being killed. - operationId: "ImageBuild" - consumes: - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "inputStream" - in: "body" - description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." - schema: - type: "string" - format: "binary" - - name: "dockerfile" - in: "query" - description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." - type: "string" - default: "Dockerfile" - - name: "t" - in: "query" - description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." - type: "string" - - name: "extrahosts" - in: "query" - description: "Extra hosts to add to /etc/hosts" - type: "string" - - name: "remote" - in: "query" - description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." - type: "string" - - name: "q" - in: "query" - description: "Suppress verbose build output." - type: "boolean" - default: false - - name: "nocache" - in: "query" - description: "Do not use the cache when building the image." - type: "boolean" - default: false - - name: "cachefrom" - in: "query" - description: "JSON array of images used for build cache resolution." - type: "string" - - name: "pull" - in: "query" - description: "Attempt to pull the image even if an older image exists locally." - type: "string" - - name: "rm" - in: "query" - description: "Remove intermediate containers after a successful build." - type: "boolean" - default: true - - name: "forcerm" - in: "query" - description: "Always remove intermediate containers, even upon failure." - type: "boolean" - default: false - - name: "memory" - in: "query" - description: "Set memory limit for build." - type: "integer" - - name: "memswap" - in: "query" - description: "Total memory (memory + swap). Set as `-1` to disable swap." - type: "integer" - - name: "cpushares" - in: "query" - description: "CPU shares (relative weight)." - type: "integer" - - name: "cpusetcpus" - in: "query" - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." - type: "string" - - name: "cpuperiod" - in: "query" - description: "The length of a CPU period in microseconds." - type: "integer" - - name: "cpuquota" - in: "query" - description: "Microseconds of CPU time that the container can get in a CPU period." - type: "integer" - - name: "buildargs" - in: "query" - description: > - JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker - uses the buildargs as the environment context for commands run via the `Dockerfile` RUN - instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for - passing secret values. - - - For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the - the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. - - - [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) - type: "string" - - name: "shmsize" - in: "query" - description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." - type: "integer" - - name: "squash" - in: "query" - description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" - type: "boolean" - - name: "labels" - in: "query" - description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." - type: "string" - - name: "networkmode" - in: "query" - description: "Sets the networking mode for the run commands during - build. Supported standard values are: `bridge`, `host`, `none`, and - `container:`. Any other value is taken as a custom network's - name to which this container should connect to." - type: "string" - - name: "Content-type" - in: "header" - type: "string" - enum: - - "application/x-tar" - default: "application/x-tar" - - name: "X-Registry-Config" - in: "header" - description: | - This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. - - The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: - - ``` - { - "docker.example.com": { - "username": "janedoe", - "password": "hunter2" - }, - "https://index.docker.io/v1/": { - "username": "mobydock", - "password": "conta1n3rize14" - } - } - ``` - - Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. - type: "string" - - name: "platform" - in: "query" - description: "Platform in the format os[/arch[/variant]]" - type: "string" - default: "" - - name: "target" - in: "query" - description: "Target build stage" - type: "string" - default: "" - - name: "outputs" - in: "query" - description: "BuildKit output configuration" - type: "string" - default: "" - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /build/prune: - post: - summary: "Delete builder cache" - produces: - - "application/json" - operationId: "BuildPrune" - parameters: - - name: "keep-storage" - in: "query" - description: "Amount of disk space in bytes to keep for cache" - type: "integer" - format: "int64" - - name: "all" - in: "query" - type: "boolean" - description: "Remove all types of build cache" - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: - - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') - - `id=` - - `parent=` - - `type=` - - `description=` - - `inuse` - - `shared` - - `private` - responses: - 200: - description: "No error" - schema: - type: "object" - title: "BuildPruneResponse" - properties: - CachesDeleted: - type: "array" - items: - description: "ID of build cache object" - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /images/create: - post: - summary: "Create an image" - description: "Create an image by either pulling it from a registry or importing it." - operationId: "ImageCreate" - consumes: - - "text/plain" - - "application/octet-stream" - produces: - - "application/json" - responses: - 200: - description: "no error" - 404: - description: "repository does not exist or no read access" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "fromImage" - in: "query" - description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." - type: "string" - - name: "fromSrc" - in: "query" - description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." - type: "string" - - name: "repo" - in: "query" - description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." - type: "string" - - name: "tag" - in: "query" - description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." - type: "string" - - name: "inputImage" - in: "body" - description: "Image content if the value `-` has been specified in fromSrc query parameter" - schema: - type: "string" - required: false - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" - type: "string" - - name: "platform" - in: "query" - description: "Platform in the format os[/arch[/variant]]" - type: "string" - default: "" - tags: ["Image"] - /images/{name}/json: - get: - summary: "Inspect an image" - description: "Return low-level information about an image." - operationId: "ImageInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Image" - examples: - application/json: - Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" - Comment: "" - Os: "linux" - Architecture: "amd64" - Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - ContainerConfig: - Tty: false - Hostname: "e611e15f9c9d" - Domainname: "" - AttachStdout: false - PublishService: "" - AttachStdin: false - OpenStdin: false - StdinOnce: false - NetworkDisabled: false - OnBuild: [] - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - User: "" - WorkingDir: "" - MacAddress: "" - AttachStderr: false - Labels: - com.example.license: "GPL" - com.example.version: "1.0" - com.example.vendor: "Acme" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - - "/bin/sh" - - "-c" - - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" - DockerVersion: "1.9.0-dev" - VirtualSize: 188359297 - Size: 0 - Author: "" - Created: "2015-09-10T08:30:53.26995814Z" - GraphDriver: - Name: "aufs" - Data: {} - RepoDigests: - - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" - RepoTags: - - "example:1.0" - - "example:latest" - - "example:stable" - Config: - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - NetworkDisabled: false - OnBuild: [] - StdinOnce: false - PublishService: "" - AttachStdin: false - OpenStdin: false - Domainname: "" - AttachStdout: false - Tty: false - Hostname: "e611e15f9c9d" - Cmd: - - "/bin/bash" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Labels: - com.example.vendor: "Acme" - com.example.version: "1.0" - com.example.license: "GPL" - MacAddress: "" - AttachStderr: false - WorkingDir: "" - User: "" - RootFS: - Type: "layers" - Layers: - - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Image"] - /images/{name}/history: - get: - summary: "Get the history of an image" - description: "Return parent layers of an image." - operationId: "ImageHistory" - produces: ["application/json"] - responses: - 200: - description: "List of image layers" - schema: - type: "array" - items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false - examples: - application/json: - - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" - Created: 1398108230 - CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" - Tags: - - "ubuntu:lucid" - - "ubuntu:10.04" - Size: 182964289 - Comment: "" - - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" - Created: 1398108222 - CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" - Tags: [] - Size: 0 - Comment: "" - - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" - Created: 1371157430 - CreatedBy: "" - Tags: - - "scratch12:latest" - - "scratch:latest" - Size: 0 - Comment: "Imported from -" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/{name}/push: - post: - summary: "Push an image" - description: | - Push an image to a registry. - - If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. - - The push is cancelled if the HTTP connection is closed. - operationId: "ImagePush" - consumes: - - "application/octet-stream" - responses: - 200: - description: "No error" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID." - type: "string" - required: true - - name: "tag" - in: "query" - description: "The tag to associate with the image on the registry." - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" - type: "string" - required: true - tags: ["Image"] - /images/{name}/tag: - post: - summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." - operationId: "ImageTag" - responses: - 201: - description: "No error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID to tag." - type: "string" - required: true - - name: "repo" - in: "query" - description: "The repository to tag in. For example, `someuser/someimage`." - type: "string" - - name: "tag" - in: "query" - description: "The name of the new tag." - type: "string" - tags: ["Image"] - /images/{name}: - delete: - summary: "Remove an image" - description: | - Remove an image, along with any untagged parent images that were - referenced by that image. - - Images can't be removed if they have descendant images, are being - used by a running container or are being used by a build. - operationId: "ImageDelete" - produces: ["application/json"] - responses: - 200: - description: "The image was deleted successfully" - schema: - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - examples: - application/json: - - Untagged: "3e2f21a89f" - - Deleted: "3e2f21a89f" - - Deleted: "53b4f83ac9" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - - name: "force" - in: "query" - description: "Remove the image even if it is being used by stopped containers or has other tags" - type: "boolean" - default: false - - name: "noprune" - in: "query" - description: "Do not delete untagged parent images" - type: "boolean" - default: false - tags: ["Image"] - /images/search: - get: - summary: "Search images" - description: "Search for an image on Docker Hub." - operationId: "ImageSearch" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - type: "object" - title: "ImageSearchResponseItem" - properties: - description: - type: "string" - is_official: - type: "boolean" - is_automated: - type: "boolean" - name: - type: "string" - star_count: - type: "integer" - examples: - application/json: - - description: "" - is_official: false - is_automated: false - name: "wma55/u1210sshd" - star_count: 0 - - description: "" - is_official: false - is_automated: false - name: "jdswinbank/sshd" - star_count: 0 - - description: "" - is_official: false - is_automated: false - name: "vgauthier/sshd" - star_count: 0 - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "term" - in: "query" - description: "Term to search" - type: "string" - required: true - - name: "limit" - in: "query" - description: "Maximum number of results to return" - type: "integer" - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - - `is-automated=(true|false)` - - `is-official=(true|false)` - - `stars=` Matches images that has at least 'number' stars. - type: "string" - tags: ["Image"] - /images/prune: - post: - summary: "Delete unused images" - produces: - - "application/json" - operationId: "ImagePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - - - `dangling=` When set to `true` (or `1`), prune only - unused *and* untagged images. When set to `false` - (or `0`), all unused images are pruned. - - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ImagePruneResponse" - properties: - ImagesDeleted: - description: "Images that were deleted" - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /auth: - post: - summary: "Check auth configuration" - description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." - operationId: "SystemAuth" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "An identity token was generated successfully." - schema: - type: "object" - title: "SystemAuthResponse" - required: [Status] - properties: - Status: - description: "The status of the authentication" - type: "string" - x-nullable: false - IdentityToken: - description: "An opaque token used to authenticate a user after a successful login" - type: "string" - x-nullable: false - examples: - application/json: - Status: "Login Succeeded" - IdentityToken: "9cbaf023786cd7..." - 204: - description: "No error" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "authConfig" - in: "body" - description: "Authentication to check" - schema: - $ref: "#/definitions/AuthConfig" - tags: ["System"] - /info: - get: - summary: "Get system information" - operationId: "SystemInfo" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/SystemInfo" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /version: - get: - summary: "Get version" - description: "Returns the version of Docker that is running and various information about the system that Docker is running on." - operationId: "SystemVersion" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - type: "object" - title: "SystemVersionResponse" - properties: - Platform: - type: "object" - required: [Name] - properties: - Name: - type: "string" - Components: - type: "array" - items: - type: "object" - x-go-name: ComponentVersion - required: [Name, Version] - properties: - Name: - type: "string" - Version: - type: "string" - x-nullable: false - Details: - type: "object" - x-nullable: true - - Version: - type: "string" - ApiVersion: - type: "string" - MinAPIVersion: - type: "string" - GitCommit: - type: "string" - GoVersion: - type: "string" - Os: - type: "string" - Arch: - type: "string" - KernelVersion: - type: "string" - Experimental: - type: "boolean" - BuildTime: - type: "string" - examples: - application/json: - Version: "17.04.0" - Os: "linux" - KernelVersion: "3.19.0-23-generic" - GoVersion: "go1.7.5" - GitCommit: "deadbee" - Arch: "amd64" - ApiVersion: "1.27" - MinAPIVersion: "1.12" - BuildTime: "2016-06-14T07:09:13.444803460+00:00" - Experimental: true - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /_ping: - get: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPing" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "OK" - headers: - API-Version: - type: "string" - description: "Max API Version the server supports" - BuildKit-Version: - type: "string" - description: "Default version of docker image builder" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - headers: - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - tags: ["System"] - head: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPingHead" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "(empty)" - headers: - API-Version: - type: "string" - description: "Max API Version the server supports" - BuildKit-Version: - type: "string" - description: "Default version of docker image builder" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /commit: - post: - summary: "Create a new image from a container" - operationId: "ImageCommit" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "containerConfig" - in: "body" - description: "The container configuration" - schema: - $ref: "#/definitions/ContainerConfig" - - name: "container" - in: "query" - description: "The ID or name of the container to commit" - type: "string" - - name: "repo" - in: "query" - description: "Repository name for the created image" - type: "string" - - name: "tag" - in: "query" - description: "Tag name for the create image" - type: "string" - - name: "comment" - in: "query" - description: "Commit message" - type: "string" - - name: "author" - in: "query" - description: "Author of the image (e.g., `John Hannibal Smith `)" - type: "string" - - name: "pause" - in: "query" - description: "Whether to pause the container before committing" - type: "boolean" - default: true - - name: "changes" - in: "query" - description: "`Dockerfile` instructions to apply while committing" - type: "string" - tags: ["Image"] - /events: - get: - summary: "Monitor events" - description: | - Stream real-time events from the server. - - Various objects within Docker report events when something happens to them. - - Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update` - - Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag` - - Volumes report these events: `create`, `mount`, `unmount`, and `destroy` - - Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove` - - The Docker daemon reports these events: `reload` - - Services report these events: `create`, `update`, and `remove` - - Nodes report these events: `create`, `update`, and `remove` - - Secrets report these events: `create`, `update`, and `remove` - - Configs report these events: `create`, `update`, and `remove` - - operationId: "SystemEvents" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "SystemEventsResponse" - properties: - Type: - description: "The type of object emitting the event" - type: "string" - Action: - description: "The type of event" - type: "string" - Actor: - type: "object" - properties: - ID: - description: "The ID of the object emitting the event" - type: "string" - Attributes: - description: "Various key/value attributes of the object, depending on its type" - type: "object" - additionalProperties: - type: "string" - time: - description: "Timestamp of event" - type: "integer" - timeNano: - description: "Timestamp of event, with nanosecond accuracy" - type: "integer" - format: "int64" - examples: - application/json: - Type: "container" - Action: "create" - Actor: - ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Attributes: - com.example.some-label: "some-label-value" - image: "alpine" - name: "my-container" - time: 1461943101 - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "since" - in: "query" - description: "Show events created since this timestamp then stream new events." - type: "string" - - name: "until" - in: "query" - description: "Show events created until this timestamp then stop streaming." - type: "string" - - name: "filters" - in: "query" - description: | - A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - - - `config=` config name or ID - - `container=` container name or ID - - `daemon=` daemon name or ID - - `event=` event type - - `image=` image name or ID - - `label=` image or container label - - `network=` network name or ID - - `node=` node ID - - `plugin`= plugin name or ID - - `scope`= local or swarm - - `secret=` secret name or ID - - `service=` service name or ID - - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - - `volume=` volume name - type: "string" - tags: ["System"] - /system/df: - get: - summary: "Get data usage information" - operationId: "SystemDataUsage" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "SystemDataUsageResponse" - properties: - LayersSize: - type: "integer" - format: "int64" - Images: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - Containers: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - Volumes: - type: "array" - items: - $ref: "#/definitions/Volume" - BuildCache: - type: "array" - items: - $ref: "#/definitions/BuildCache" - example: - LayersSize: 1092588 - Images: - - - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - ParentId: "" - RepoTags: - - "busybox:latest" - RepoDigests: - - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" - Created: 1466724217 - Size: 1092588 - SharedSize: 0 - VirtualSize: 1092588 - Labels: {} - Containers: 1 - Containers: - - - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" - Names: - - "/top" - Image: "busybox" - ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - Command: "top" - Created: 1472592424 - Ports: [] - SizeRootFs: 1092588 - Labels: {} - State: "exited" - Status: "Exited (0) 56 minutes ago" - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - IPAMConfig: null - Links: null - Aliases: null - NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" - EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" - Gateway: "172.18.0.1" - IPAddress: "172.18.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Mounts: [] - Volumes: - - - Name: "my-volume" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/my-volume/_data" - Labels: null - Scope: "local" - Options: null - UsageData: - Size: 10920104 - RefCount: 2 - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /images/{name}/get: - get: - summary: "Export an image" - description: | - Get a tarball containing all images and metadata for a repository. - - If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. - - ### Image tarball format - - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer - - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. - - If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. - - ```json - { - "hello-world": { - "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" - } - } - ``` - operationId: "ImageGet" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/get: - get: - summary: "Export several images" - description: | - Get a tarball containing all images and metadata for several image repositories. - - For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. - - For details on the format, see [the export image endpoint](#operation/ImageGet). - operationId: "ImageGetAll" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "names" - in: "query" - description: "Image names to filter by" - type: "array" - items: - type: "string" - tags: ["Image"] - /images/load: - post: - summary: "Import images" - description: | - Load a set of images and tags into a repository. - - For details on the format, see [the export image endpoint](#operation/ImageGet). - operationId: "ImageLoad" - consumes: - - "application/x-tar" - produces: - - "application/json" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "imagesTarball" - in: "body" - description: "Tar archive containing images" - schema: - type: "string" - format: "binary" - - name: "quiet" - in: "query" - description: "Suppress progress details during load." - type: "boolean" - default: false - tags: ["Image"] - /containers/{id}/exec: - post: - summary: "Create an exec instance" - description: "Run a command inside a running container." - operationId: "ContainerExec" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is paused" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execConfig" - in: "body" - description: "Exec configuration" - schema: - type: "object" - properties: - AttachStdin: - type: "boolean" - description: "Attach to `stdin` of the exec command." - AttachStdout: - type: "boolean" - description: "Attach to `stdout` of the exec command." - AttachStderr: - type: "boolean" - description: "Attach to `stderr` of the exec command." - DetachKeys: - type: "string" - description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - Env: - description: "A list of environment variables in the form `[\"VAR=value\", ...]`." - type: "array" - items: - type: "string" - Cmd: - type: "array" - description: "Command to run, as a string or array of strings." - items: - type: "string" - Privileged: - type: "boolean" - description: "Runs the exec process with extended privileges." - default: false - User: - type: "string" - description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." - WorkingDir: - type: "string" - description: "The working directory for the exec process inside the container." - example: - AttachStdin: false - AttachStdout: true - AttachStderr: true - DetachKeys: "ctrl-p,ctrl-q" - Tty: false - Cmd: - - "date" - Env: - - "FOO=bar" - - "BAZ=quux" - required: true - - name: "id" - in: "path" - description: "ID or name of container" - type: "string" - required: true - tags: ["Exec"] - /exec/{id}/start: - post: - summary: "Start an exec instance" - description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." - operationId: "ExecStart" - consumes: - - "application/json" - produces: - - "application/vnd.docker.raw-stream" - responses: - 200: - description: "No error" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Container is stopped or paused" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execStartConfig" - in: "body" - schema: - type: "object" - properties: - Detach: - type: "boolean" - description: "Detach from the command." - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - example: - Detach: false - Tty: false - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - /exec/{id}/resize: - post: - summary: "Resize an exec instance" - description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." - operationId: "ExecResize" - responses: - 201: - description: "No error" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - - name: "h" - in: "query" - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Exec"] - /exec/{id}/json: - get: - summary: "Inspect an exec instance" - description: "Return low-level information about an exec instance." - operationId: "ExecInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ExecInspectResponse" - properties: - CanRemove: - type: "boolean" - DetachKeys: - type: "string" - ID: - type: "string" - Running: - type: "boolean" - ExitCode: - type: "integer" - ProcessConfig: - $ref: "#/definitions/ProcessConfig" - OpenStdin: - type: "boolean" - OpenStderr: - type: "boolean" - OpenStdout: - type: "boolean" - ContainerID: - type: "string" - Pid: - type: "integer" - description: "The system process ID for the exec process." - examples: - application/json: - CanRemove: false - ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" - DetachKeys: "" - ExitCode: 2 - ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" - OpenStderr: true - OpenStdin: true - OpenStdout: true - ProcessConfig: - arguments: - - "-c" - - "exit 2" - entrypoint: "sh" - privileged: false - tty: true - user: "1000" - Running: false - Pid: 42000 - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - - /volumes: - get: - summary: "List volumes" - operationId: "VolumeList" - produces: ["application/json"] - responses: - 200: - description: "Summary volume data that matches the query" - schema: - type: "object" - title: "VolumeListResponse" - description: "Volume list response" - required: [Volumes, Warnings] - properties: - Volumes: - type: "array" - x-nullable: false - description: "List of volumes" - items: - $ref: "#/definitions/Volume" - Warnings: - type: "array" - x-nullable: false - description: "Warnings that occurred when fetching the list of volumes" - items: - type: "string" - - examples: - application/json: - Volumes: - - CreatedAt: "2017-07-19T12:00:26Z" - Name: "tardis" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - Options: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - Warnings: [] - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to - process on the volumes list. Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - volumes that are not in use by a container. When set to `false` - (or `0`), only volumes that are in use by one or more - containers are returned. - - `driver=` Matches volumes based on their driver. - - `label=` or `label=:` Matches volumes based on - the presence of a `label` alone or a `label` and a value. - - `name=` Matches all or part of a volume name. - type: "string" - format: "json" - tags: ["Volume"] - - /volumes/create: - post: - summary: "Create a volume" - operationId: "VolumeCreate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 201: - description: "The volume was created successfully" - schema: - $ref: "#/definitions/Volume" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "volumeConfig" - in: "body" - required: true - description: "Volume configuration" - schema: - type: "object" - description: "Volume configuration" - title: "VolumeConfig" - properties: - Name: - description: "The new volume's name. If not specified, Docker generates a name." - type: "string" - x-nullable: false - Driver: - description: "Name of the volume driver to use." - type: "string" - default: "local" - x-nullable: false - DriverOpts: - description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Driver: "custom" - tags: ["Volume"] - - /volumes/{name}: - get: - summary: "Inspect a volume" - operationId: "VolumeInspect" - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Volume" - 404: - description: "No such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - tags: ["Volume"] - - delete: - summary: "Remove a volume" - description: "Instruct the driver to remove the volume." - operationId: "VolumeDelete" - responses: - 204: - description: "The volume was removed" - 404: - description: "No such volume or volume driver" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Volume is in use and cannot be removed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - - name: "force" - in: "query" - description: "Force the removal of the volume" - type: "boolean" - default: false - tags: ["Volume"] - /volumes/prune: - post: - summary: "Delete unused volumes" - produces: - - "application/json" - operationId: "VolumePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "VolumePruneResponse" - properties: - VolumesDeleted: - description: "Volumes that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Volume"] - /networks: - get: - summary: "List networks" - description: | - Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect). - - Note that it uses a different, smaller representation of a network than inspecting a single network. For example, - the list of containers attached to the network is not propagated in API versions 1.28 and up. - operationId: "NetworkList" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Network" - examples: - application/json: - - Name: "bridge" - Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" - Created: "2016-10-19T06:21:00.416543526Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: - - - Subnet: "172.17.0.0/16" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - - Name: "none" - Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "null" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - - Name: "host" - Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "host" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - networks that are not in use by a container. When set to `false` - (or `0`), only networks that are in use by one or more - containers are returned. - - `driver=` Matches a network's driver. - - `id=` Matches all or part of a network ID. - - `label=` or `label==` of a network label. - - `name=` Matches all or part of a network name. - - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. - type: "string" - tags: ["Network"] - - /networks/{id}: - get: - summary: "Inspect a network" - operationId: "NetworkInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Network" - 404: - description: "Network not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "verbose" - in: "query" - description: "Detailed inspect output for troubleshooting" - type: "boolean" - default: false - - name: "scope" - in: "query" - description: "Filter the network by scope (swarm, global, or local)" - type: "string" - tags: ["Network"] - - delete: - summary: "Remove a network" - operationId: "NetworkDelete" - responses: - 204: - description: "No error" - 403: - description: "operation not supported for pre-defined networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such network" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - tags: ["Network"] - - /networks/create: - post: - summary: "Create a network" - operationId: "NetworkCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "No error" - schema: - type: "object" - title: "NetworkCreateResponse" - properties: - Id: - description: "The ID of the created network." - type: "string" - Warning: - type: "string" - example: - Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" - Warning: "" - 403: - description: "operation not supported for pre-defined networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "plugin not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "networkConfig" - in: "body" - description: "Network configuration" - required: true - schema: - type: "object" - required: ["Name"] - properties: - Name: - description: "The network's name." - type: "string" - CheckDuplicate: - description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions." - type: "boolean" - Driver: - description: "Name of the network driver plugin to use." - type: "string" - default: "bridge" - Internal: - description: "Restrict external access to the network." - type: "boolean" - Attachable: - description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." - type: "boolean" - Ingress: - description: "Ingress network is the network which provides the routing-mesh in swarm mode." - type: "boolean" - IPAM: - description: "Optional custom IP scheme for the network." - $ref: "#/definitions/IPAM" - EnableIPv6: - description: "Enable IPv6 on the network." - type: "boolean" - Options: - description: "Network specific options to be used by the drivers." - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "isolated_nw" - CheckDuplicate: false - Driver: "bridge" - EnableIPv6: true - IPAM: - Driver: "default" - Config: - - Subnet: "172.20.0.0/16" - IPRange: "172.20.10.0/24" - Gateway: "172.20.10.11" - - Subnet: "2001:db8:abcd::/64" - Gateway: "2001:db8:abcd::1011" - Options: - foo: "bar" - Internal: true - Attachable: false - Ingress: false - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - tags: ["Network"] - - /networks/{id}/connect: - post: - summary: "Connect a container to a network" - operationId: "NetworkConnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - properties: - Container: - type: "string" - description: "The ID or name of the container to connect to the network." - EndpointConfig: - $ref: "#/definitions/EndpointSettings" - example: - Container: "3613f73ba0e4" - EndpointConfig: - IPAMConfig: - IPv4Address: "172.24.56.89" - IPv6Address: "2001:db8::5689" - tags: ["Network"] - - /networks/{id}/disconnect: - post: - summary: "Disconnect a container from a network" - operationId: "NetworkDisconnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - properties: - Container: - type: "string" - description: "The ID or name of the container to disconnect from the network." - Force: - type: "boolean" - description: "Force the container to disconnect from the network." - tags: ["Network"] - /networks/prune: - post: - summary: "Delete unused networks" - produces: - - "application/json" - operationId: "NetworkPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "NetworkPruneResponse" - properties: - NetworksDeleted: - description: "Networks that were deleted" - type: "array" - items: - type: "string" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Network"] - /plugins: - get: - summary: "List plugins" - operationId: "PluginList" - description: "Returns information about installed plugins." - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Plugin" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - - - `capability=` - - `enable=|` - tags: ["Plugin"] - - /plugins/privileges: - get: - summary: "Get plugin privileges" - operationId: "GetPluginPrivileges" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - description: "Describes a permission the user has to accept upon installing the plugin." - type: "object" - title: "PluginPrivilegeItem" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - tags: - - "Plugin" - - /plugins/pull: - post: - summary: "Install a plugin" - operationId: "PluginPull" - description: | - Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). - produces: - - "application/json" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - Remote reference for plugin to install. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "name" - in: "query" - description: | - Local name for the pulled plugin. - - The `:latest` tag is optional, and is used as the default if omitted. - required: false - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - description: "Describes a permission accepted by the user upon installing the plugin." - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/{name}/json: - get: - summary: "Inspect a plugin" - operationId: "PluginInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}: - delete: - summary: "Remove a plugin" - operationId: "PluginDelete" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "force" - in: "query" - description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." - type: "boolean" - default: false - tags: ["Plugin"] - /plugins/{name}/enable: - post: - summary: "Enable a plugin" - operationId: "PluginEnable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "timeout" - in: "query" - description: "Set the HTTP client timeout (in seconds)" - type: "integer" - default: 0 - tags: ["Plugin"] - /plugins/{name}/disable: - post: - summary: "Disable a plugin" - operationId: "PluginDisable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}/upgrade: - post: - summary: "Upgrade a plugin" - operationId: "PluginUpgrade" - responses: - 204: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "remote" - in: "query" - description: | - Remote reference to upgrade to. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - description: "Describes a permission accepted by the user upon installing the plugin." - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/create: - post: - summary: "Create a plugin" - operationId: "PluginCreate" - consumes: - - "application/x-tar" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "query" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "tarContext" - in: "body" - description: "Path to tar containing plugin rootfs and manifest" - schema: - type: "string" - format: "binary" - tags: ["Plugin"] - /plugins/{name}/push: - post: - summary: "Push a plugin" - operationId: "PluginPush" - description: | - Push a plugin to the registry. - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - responses: - 200: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /plugins/{name}/set: - post: - summary: "Configure a plugin" - operationId: "PluginSet" - consumes: - - "application/json" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - type: "string" - example: ["DEBUG=1"] - responses: - 204: - description: "No error" - 404: - description: "Plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /nodes: - get: - summary: "List nodes" - operationId: "NodeList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Node" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `id=` - - `label=` - - `membership=`(`accepted`|`pending`)` - - `name=` - - `node.label=` - - `role=`(`manager`|`worker`)` - type: "string" - tags: ["Node"] - /nodes/{id}: - get: - summary: "Inspect a node" - operationId: "NodeInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Node" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - tags: ["Node"] - delete: - summary: "Delete a node" - operationId: "NodeDelete" - responses: - 200: - description: "no error" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - - name: "force" - in: "query" - description: "Force remove a node from the swarm" - default: false - type: "boolean" - tags: ["Node"] - /nodes/{id}/update: - post: - summary: "Update a node" - operationId: "NodeUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID of the node" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/NodeSpec" - - name: "version" - in: "query" - description: "The version number of the node object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - tags: ["Node"] - /swarm: - get: - summary: "Inspect swarm" - operationId: "SwarmInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Swarm" - 404: - description: "no such swarm" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/init: - post: - summary: "Initialize a new swarm" - operationId: "SwarmInit" - produces: - - "application/json" - - "text/plain" - responses: - 200: - description: "no error" - schema: - description: "The node ID" - type: "string" - example: "7v2t30z9blmxuhnyo6s4cpenp" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - properties: - ListenAddr: - description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." - type: "string" - AdvertiseAddr: - description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, - or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` - is used. - - The `DataPathAddr` specifies the address that global scope network drivers will publish towards other - nodes in order to reach the containers running on this node. Using this parameter it is possible to - separate the container data traffic from the management traffic of the cluster. - type: "string" - DataPathPort: - description: | - DataPathPort specifies the data path port number for data traffic. - Acceptable port range is 1024 to 49151. - if no port is set or is set to 0, default port 4789 will be used. - type: "integer" - format: "uint32" - DefaultAddrPool: - description: | - Default Address Pool specifies default subnet pools for global scope networks. - type: "array" - items: - type: "string" - example: ["10.10.0.0/16", "20.20.0.0/16"] - ForceNewCluster: - description: "Force creation of a new swarm." - type: "boolean" - SubnetSize: - description: | - SubnetSize specifies the subnet size of the networks created from the default subnet pool - type: "integer" - format: "uint32" - Spec: - $ref: "#/definitions/SwarmSpec" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - DataPathPort: 4789 - DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] - SubnetSize: 24 - ForceNewCluster: false - Spec: - Orchestration: {} - Raft: {} - Dispatcher: {} - CAConfig: {} - EncryptionConfig: - AutoLockManagers: false - tags: ["Swarm"] - /swarm/join: - post: - summary: "Join an existing swarm" - operationId: "SwarmJoin" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - properties: - ListenAddr: - description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." - type: "string" - AdvertiseAddr: - description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, - or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` - is used. - - The `DataPathAddr` specifies the address that global scope network drivers will publish towards other - nodes in order to reach the containers running on this node. Using this parameter it is possible to - separate the container data traffic from the management traffic of the cluster. - - type: "string" - RemoteAddrs: - description: "Addresses of manager nodes already participating in the swarm." - type: "string" - JoinToken: - description: "Secret token for joining this swarm." - type: "string" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - RemoteAddrs: - - "node1:2377" - JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - tags: ["Swarm"] - /swarm/leave: - post: - summary: "Leave a swarm" - operationId: "SwarmLeave" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "force" - description: "Force leave swarm, even if this is the last manager or that it will break the cluster." - in: "query" - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/update: - post: - summary: "Update a swarm" - operationId: "SwarmUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - $ref: "#/definitions/SwarmSpec" - - name: "version" - in: "query" - description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - - name: "rotateWorkerToken" - in: "query" - description: "Rotate the worker join token." - type: "boolean" - default: false - - name: "rotateManagerToken" - in: "query" - description: "Rotate the manager join token." - type: "boolean" - default: false - - name: "rotateManagerUnlockKey" - in: "query" - description: "Rotate the manager unlock key." - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/unlockkey: - get: - summary: "Get the unlock key" - operationId: "SwarmUnlockkey" - consumes: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "UnlockKeyResponse" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/unlock: - post: - summary: "Unlock a locked manager" - operationId: "SwarmUnlock" - consumes: - - "application/json" - produces: - - "application/json" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /services: - get: - summary: "List services" - operationId: "ServiceList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Service" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - - - `id=` - - `label=` - - `mode=["replicated"|"global"]` - - `name=` - tags: ["Service"] - /services/create: - post: - summary: "Create a service" - operationId: "ServiceCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - type: "object" - title: "ServiceCreateResponse" - properties: - ID: - description: "The ID of the created service." - type: "string" - Warning: - description: "Optional warning message" - type: "string" - example: - ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "network is not eligible for services" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "name conflicts with an existing service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "web" - TaskTemplate: - ContainerSpec: - Image: "nginx:alpine" - Mounts: - - - ReadOnly: true - Source: "web-data" - Target: "/usr/share/nginx/html" - Type: "volume" - VolumeOptions: - DriverConfig: {} - Labels: - com.example.something: "something-value" - Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] - User: "33" - DNSConfig: - Nameservers: ["8.8.8.8"] - Search: ["example.org"] - Options: ["timeout:3"] - Secrets: - - - File: - Name: "www.example.org.key" - UID: "33" - GID: "33" - Mode: 384 - SecretID: "fpjqlhnwb19zds35k8wn80lq9" - SecretName: "example_org_domain_key" - LogDriver: - Name: "json-file" - Options: - max-file: "3" - max-size: "10M" - Placement: {} - Resources: - Limits: - MemoryBytes: 104857600 - Reservations: {} - RestartPolicy: - Condition: "on-failure" - Delay: 10000000000 - MaxAttempts: 10 - Mode: - Replicated: - Replicas: 4 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Ports: - - - Protocol: "tcp" - PublishedPort: 8080 - TargetPort: 80 - Labels: - foo: "bar" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" - type: "string" - tags: ["Service"] - /services/{id}: - get: - summary: "Inspect a service" - operationId: "ServiceInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Service" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "insertDefaults" - in: "query" - description: "Fill empty fields with default values." - type: "boolean" - default: false - tags: ["Service"] - delete: - summary: "Delete a service" - operationId: "ServiceDelete" - responses: - 200: - description: "no error" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - tags: ["Service"] - /services/{id}/update: - post: - summary: "Update a service" - operationId: "ServiceUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ServiceUpdateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "top" - TaskTemplate: - ContainerSpec: - Image: "busybox" - Args: - - "top" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - - - name: "version" - in: "query" - description: "The version number of the service object being updated. - This is required to avoid conflicting writes. - This version number should be the value as currently set on the service *before* the update. - You can find the current version by calling `GET /services/{id}`" - required: true - type: "integer" - - name: "registryAuthFrom" - in: "query" - type: "string" - description: "If the X-Registry-Auth header is not specified, this - parameter indicates where to find registry authorization credentials. The - valid values are `spec` and `previous-spec`." - default: "spec" - - name: "rollback" - in: "query" - type: "string" - description: "Set to this parameter to `previous` to cause a - server-side rollback to the previous service spec. The supplied spec will be - ignored in this case." - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" - type: "string" - - tags: ["Service"] - /services/{id}/logs: - get: - summary: "Get service logs" - description: | - Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). - - **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. - operationId: "ServiceLogs" - responses: - 200: - description: "logs returned as a stream in response body" - schema: - type: "string" - format: "binary" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such service: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the service" - type: "string" - - name: "details" - in: "query" - description: "Show service context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." - type: "string" - default: "all" - tags: ["Service"] - /tasks: - get: - summary: "List tasks" - operationId: "TaskList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Task" - example: - - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - - ID: "1yljwbmlr8er2waf8orvqpwms" - Version: - Index: 30 - CreatedAt: "2016-06-07T21:07:30.019104782Z" - UpdatedAt: "2016-06-07T21:07:30.231958098Z" - Name: "hopeful_cori" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:30.202183143Z" - State: "shutdown" - Message: "shutdown" - ContainerStatus: - ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" - DesiredState: "shutdown" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.5/16" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - - - `desired-state=(running | shutdown | accepted)` - - `id=` - - `label=key` or `label="key=value"` - - `name=` - - `node=` - - `service=` - tags: ["Task"] - /tasks/{id}: - get: - summary: "Inspect a task" - operationId: "TaskInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Task" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID of the task" - required: true - type: "string" - tags: ["Task"] - /tasks/{id}/logs: - get: - summary: "Get task logs" - description: | - Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). - - **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. - operationId: "TaskLogs" - responses: - 200: - description: "logs returned as a stream in response body" - schema: - type: "string" - format: "binary" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such task: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID of the task" - type: "string" - - name: "details" - in: "query" - description: "Show task context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." - type: "string" - default: "all" - tags: ["Task"] - /secrets: - get: - summary: "List secrets" - operationId: "SecretList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Secret" - example: - - ID: "blt1owaxmitz71s9v5zh81zun" - Version: - Index: 85 - CreatedAt: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: "2017-07-20T13:55:28.678958722Z" - Spec: - Name: "mysql-passwd" - Labels: - some.label: "some.value" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Secret"] - /secrets/create: - post: - summary: "Create a secret" - operationId: "SecretCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/SecretSpec" - - type: "object" - example: - Name: "app-key.crt" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - tags: ["Secret"] - /secrets/{id}: - get: - summary: "Inspect a secret" - operationId: "SecretInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Secret" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - delete: - summary: "Delete a secret" - operationId: "SecretDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - /secrets/{id}/update: - post: - summary: "Update a Secret" - operationId: "SecretUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such secret" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the secret" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/SecretSpec" - description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." - - name: "version" - in: "query" - description: "The version number of the secret object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - tags: ["Secret"] - /configs: - get: - summary: "List configs" - operationId: "ConfigList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Config" - example: - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "server.conf" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Config"] - /configs/create: - post: - summary: "Create a config" - operationId: "ConfigCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/ConfigSpec" - - type: "object" - example: - Name: "server.conf" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - tags: ["Config"] - /configs/{id}: - get: - summary: "Inspect a config" - operationId: "ConfigInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Config" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - delete: - summary: "Delete a config" - operationId: "ConfigDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - /configs/{id}/update: - post: - summary: "Update a Config" - operationId: "ConfigUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such config" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the config" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/ConfigSpec" - description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values." - - name: "version" - in: "query" - description: "The version number of the config object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - tags: ["Config"] - /distribution/{name}/json: - get: - summary: "Get image information from the registry" - description: "Return image digest and platform information by contacting the registry." - operationId: "DistributionInspect" - produces: - - "application/json" - responses: - 200: - description: "descriptor and platform information" - schema: - type: "object" - x-go-name: DistributionInspect - title: "DistributionInspectResponse" - required: [Descriptor, Platforms] - properties: - Descriptor: - type: "object" - description: "A descriptor struct containing digest, media type, and size" - properties: - MediaType: - type: "string" - Size: - type: "integer" - format: "int64" - Digest: - type: "string" - URLs: - type: "array" - items: - type: "string" - Platforms: - type: "array" - description: "An array containing all platforms supported by the image" - items: - type: "object" - properties: - Architecture: - type: "string" - OS: - type: "string" - OSVersion: - type: "string" - OSFeatures: - type: "array" - items: - type: "string" - Variant: - type: "string" - Features: - type: "array" - items: - type: "string" - examples: - application/json: - Descriptor: - MediaType: "application/vnd.docker.distribution.manifest.v2+json" - Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" - Size: 3987495 - URLs: - - "" - Platforms: - - Architecture: "amd64" - OS: "linux" - OSVersion: "" - OSFeatures: - - "" - Variant: "" - Features: - - "" - 401: - description: "Failed authentication or no image found" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Distribution"] - /session: - post: - summary: "Initialize interactive session" - description: | - Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. - - > **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental - > features enabled. The specifications for this endpoint may still change in a future version of the API. - - ### Hijacking - - This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /session HTTP/1.1 - Upgrade: h2c - Connection: Upgrade - ``` - - The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Connection: Upgrade - Upgrade: h2c - ``` - operationId: "Session" - produces: - - "application/vnd.docker.raw-stream" - responses: - 101: - description: "no error, hijacking successful" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Session (experimental)"] diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go deleted file mode 100644 index 027c6edb..00000000 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ /dev/null @@ -1,52 +0,0 @@ -package events // import "github.com/docker/docker/api/types/events" - -const ( - // ContainerEventType is the event type that containers generate - ContainerEventType = "container" - // DaemonEventType is the event type that daemon generate - DaemonEventType = "daemon" - // ImageEventType is the event type that images generate - ImageEventType = "image" - // NetworkEventType is the event type that networks generate - NetworkEventType = "network" - // PluginEventType is the event type that plugins generate - PluginEventType = "plugin" - // VolumeEventType is the event type that volumes generate - VolumeEventType = "volume" - // ServiceEventType is the event type that services generate - ServiceEventType = "service" - // NodeEventType is the event type that nodes generate - NodeEventType = "node" - // SecretEventType is the event type that secrets generate - SecretEventType = "secret" - // ConfigEventType is the event type that configs generate - ConfigEventType = "config" -) - -// Actor describes something that generates events, -// like a container, or a network, or a volume. -// It has a defined name and a set or attributes. -// The container attributes are its labels, other actors -// can generate these attributes from other properties. -type Actor struct { - ID string - Attributes map[string]string -} - -// Message represents the information an event contains -type Message struct { - // Deprecated information from JSONMessage. - // With data only in container events. - Status string `json:"status,omitempty"` - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - - Type string - Action string - Actor Actor - // Engine events are local scope. Cluster events are swarm scope. - Scope string `json:"scope,omitempty"` - - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go deleted file mode 100644 index b5a7a0c4..00000000 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ /dev/null @@ -1,37 +0,0 @@ -package image // import "github.com/docker/docker/api/types/image" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// HistoryResponseItem individual image layer information in response to ImageHistory operation -// swagger:model HistoryResponseItem -type HistoryResponseItem struct { - - // comment - // Required: true - Comment string `json:"Comment"` - - // created - // Required: true - Created int64 `json:"Created"` - - // created by - // Required: true - CreatedBy string `json:"CreatedBy"` - - // Id - // Required: true - ID string `json:"Id"` - - // size - // Required: true - Size int64 `json:"Size"` - - // tags - // Required: true - Tags []string `json:"Tags"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go deleted file mode 100644 index ef020f45..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Version represents the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// Meta is a base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:"Labels"` -} - -// Driver represents a driver (network, logging, secrets backend). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TLSInfo represents the TLS information about what CA certificate is trusted, -// and who the issuer for a TLS certificate is -type TLSInfo struct { - // TrustRoot is the trusted CA root certificate in PEM format - TrustRoot string `json:",omitempty"` - - // CertIssuer is the raw subject bytes of the issuer - CertIssuerSubject []byte `json:",omitempty"` - - // CertIssuerPublicKey is the raw public key bytes of the issuer - CertIssuerPublicKey []byte `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go deleted file mode 100644 index 16202ccc..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Config represents a config. -type Config struct { - ID string - Meta - Spec ConfigSpec -} - -// ConfigSpec represents a config specification from a config in swarm -type ConfigSpec struct { - Annotations - Data []byte `json:",omitempty"` - - // Templating controls whether and how to evaluate the config payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// ConfigReferenceFileTarget is a file target in a config reference -type ConfigReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// ConfigReferenceRuntimeTarget is a target for a config specifying that it -// isn't mounted into the container but instead has some other purpose. -type ConfigReferenceRuntimeTarget struct{} - -// ConfigReference is a reference to a config in swarm -type ConfigReference struct { - File *ConfigReferenceFileTarget `json:",omitempty"` - Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` - ConfigID string - ConfigName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go deleted file mode 100644 index 48190c17..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ /dev/null @@ -1,76 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" -) - -// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) -// Detailed documentation is available in: -// http://man7.org/linux/man-pages/man5/resolv.conf.5.html -// `nameserver`, `search`, `options` have been supported. -// TODO: `domain` is not supported yet. -type DNSConfig struct { - // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` - // Search specifies the search list for host-name lookup - Search []string `json:",omitempty"` - // Options allows certain internal resolver variables to be modified - Options []string `json:",omitempty"` -} - -// SELinuxContext contains the SELinux labels of the container. -type SELinuxContext struct { - Disable bool - - User string - Role string - Type string - Level string -} - -// CredentialSpec for managed service account (Windows only) -type CredentialSpec struct { - Config string - File string - Registry string -} - -// Privileges defines the security options for the container. -type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext -} - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Hostname string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - Privileges *Privileges `json:",omitempty"` - Init *bool `json:",omitempty"` - StopSignal string `json:",omitempty"` - TTY bool `json:",omitempty"` - OpenStdin bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` - Healthcheck *container.HealthConfig `json:",omitempty"` - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go deleted file mode 100644 index 98ef3284..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ /dev/null @@ -1,121 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "github.com/docker/docker/api/types/network" -) - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - // TargetPort is the port inside the container - TargetPort uint32 `json:",omitempty"` - // PublishedPort is the port on the swarm hosts - PublishedPort uint32 `json:",omitempty"` - // PublishMode is the mode in which port is published - PublishMode PortConfigPublishMode `json:",omitempty"` -} - -// PortConfigPublishMode represents the mode in which the port is to -// be published. -type PortConfigPublishMode string - -const ( - // PortConfigPublishModeIngress is used for ports published - // for ingress load balancing using routing mesh. - PortConfigPublishModeIngress PortConfigPublishMode = "ingress" - // PortConfigPublishModeHost is used for ports published - // for direct host level access on the host where the task is running. - PortConfigPublishModeHost PortConfigPublishMode = "host" -) - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" - // PortConfigProtocolSCTP SCTP - PortConfigProtocolSCTP PortConfigProtocol = "sctp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` - ConfigFrom *network.ConfigReference `json:",omitempty"` - Scope string `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` - DriverOpts map[string]string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go deleted file mode 100644 index 1e30f5fa..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ /dev/null @@ -1,115 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// Node represents a node. -type Node struct { - ID string - Meta - // Spec defines the desired state of the node as specified by the user. - // The system will honor this and will *never* modify it. - Spec NodeSpec `json:",omitempty"` - // Description encapsulates the properties of the Node as reported by the - // agent. - Description NodeDescription `json:",omitempty"` - // Status provides the current status of the node, as seen by the manager. - Status NodeStatus `json:",omitempty"` - // ManagerStatus provides the current status of the node's manager - // component, if the node is a manager. - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` - TLSInfo TLSInfo `json:",omitempty"` -} - -// Platform represents the platform (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go deleted file mode 100644 index 0c77403c..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ /dev/null @@ -1,27 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// RuntimeType is the type of runtime used for the TaskSpec -type RuntimeType string - -// RuntimeURL is the proto type url -type RuntimeURL string - -const ( - // RuntimeContainer is the container based runtime - RuntimeContainer RuntimeType = "container" - // RuntimePlugin is the plugin based runtime - RuntimePlugin RuntimeType = "plugin" - // RuntimeNetworkAttachment is the network attachment runtime - RuntimeNetworkAttachment RuntimeType = "attachment" - - // RuntimeURLContainer is the proto url for the container type - RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" - // RuntimeURLPlugin is the proto url for the plugin type - RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" -) - -// NetworkAttachmentSpec represents the runtime spec type for network -// attachment tasks -type NetworkAttachmentSpec struct { - ContainerID string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 98c2806c..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index 1fdc9b04..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,712 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: plugin.proto -// DO NOT EDIT! - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ -package runtime - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Disabled { - dAtA[i] = 0x20 - i++ - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PluginSpec) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 196 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, - 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, - 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, - 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, - 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, - 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, - 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, - 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, - 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, - 0x0c, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index 6d63b778..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go deleted file mode 100644 index d5213ec9..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ /dev/null @@ -1,36 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Secret represents a secret. -type Secret struct { - ID string - Meta - Spec SecretSpec -} - -// SecretSpec represents a secret specification from a secret in swarm -type SecretSpec struct { - Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store - - // Templating controls whether and how to evaluate the secret payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// SecretReferenceFileTarget is a file target in a secret reference -type SecretReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// SecretReference is a reference to a secret in swarm -type SecretReference struct { - File *SecretReferenceFileTarget - SecretID string - SecretName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go deleted file mode 100644 index abf192e7..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ /dev/null @@ -1,124 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus *UpdateStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - RollbackConfig *UpdateConfig `json:",omitempty"` - - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" - // UpdateStateRollbackStarted is the state with a rollback in progress. - UpdateStateRollbackStarted UpdateState = "rollback_started" - // UpdateStateRollbackPaused is the state with a rollback in progress. - UpdateStateRollbackPaused UpdateState = "rollback_paused" - // UpdateStateRollbackCompleted is the state with a rollback in progress. - UpdateStateRollbackCompleted UpdateState = "rollback_completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt *time.Time `json:",omitempty"` - CompletedAt *time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" - // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" - - // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" - // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - // Maximum number of tasks to be updated in one iteration. - // 0 means unlimited parallelism. - Parallelism uint64 - - // Amount of time between updates. - Delay time.Duration `json:",omitempty"` - - // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` - - // Monitor indicates how long to monitor a task for failure after it is - // created. If the task fails by ending up in one of the states - // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, - // this counts as a failure. If it fails after Monitor, it does not - // count as a failure. If Monitor is unspecified, a default value will - // be used. - Monitor time.Duration `json:",omitempty"` - - // MaxFailureRatio is the fraction of tasks that may fail during - // an update before the failure action is invoked. Any task created by - // the current update which ends up in one of the states REJECTED, - // COMPLETED or FAILED within Monitor from its creation counts as a - // failure. The number of failures is divided by the number of tasks - // being updated, and if this fraction is greater than - // MaxFailureRatio, the failure action is invoked. - // - // If the failure action is CONTINUE, there is no effect. - // If the failure action is PAUSE, no more tasks will be updated until - // another update is started. - MaxFailureRatio float32 - - // Order indicates the order of operations when rolling out an updated - // task. Either the old task is shut down before the new task is - // started, or the new task is started before the old task is shut down. - Order string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go deleted file mode 100644 index b25f9996..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ /dev/null @@ -1,227 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" -) - -// ClusterInfo represents info about the cluster for outputting in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec - TLSInfo TLSInfo - RootRotationInProgress bool - DefaultAddrPool []string - SubnetSize uint32 - DataPathPort uint32 -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - // Worker is the join token workers may use to join the swarm. - Worker string - // Manager is the join token managers may use to join the swarm. - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` - EncryptionConfig EncryptionConfig `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or - // node. If negative, never remove completed or failed tasks. - TaskHistoryRetentionLimit *int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// EncryptionConfig controls at-rest encryption of data and keys. -type EncryptionConfig struct { - // AutoLockManagers specifies whether or not managers TLS keys and raft data - // should be encrypted at rest in such a way that they must be unlocked - // before the manager node starts up again. - AutoLockManagers bool -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - // SnapshotInterval is the number of log entries between snapshots. - SnapshotInterval uint64 `json:",omitempty"` - - // KeepOldSnapshots is the number of snapshots to keep beyond the - // current snapshot. - KeepOldSnapshots *uint64 `json:",omitempty"` - - // LogEntriesForSlowFollowers is the number of log entries to keep - // around to sync up slow followers after a snapshot is created. - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - - // ElectionTick is the number of ticks that a follower will wait for a message - // from the leader before becoming a candidate and starting an election. - // ElectionTick must be greater than HeartbeatTick. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - ElectionTick int - - // HeartbeatTick is the number of ticks between heartbeats. Every - // HeartbeatTick ticks, the leader will send a heartbeat to the - // followers. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - HeartbeatTick int -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - // HeartbeatPeriod defines how often agent should send heartbeats to - // dispatcher. - HeartbeatPeriod time.Duration `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - // NodeCertExpiry is the duration certificates should be issued for - NodeCertExpiry time.Duration `json:",omitempty"` - - // ExternalCAs is a list of CAs to which a manager node will make - // certificate signing requests for node certificates. - ExternalCAs []*ExternalCA `json:",omitempty"` - - // SigningCACert and SigningCAKey specify the desired signing root CA and - // root CA key for the swarm. When inspecting the cluster, the key will - // be redacted. - SigningCACert string `json:",omitempty"` - SigningCAKey string `json:",omitempty"` - - // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. - ForceRotate uint64 `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - // Protocol is the protocol used by this external CA. - Protocol ExternalCAProtocol - - // URL is the URL where the external CA can be reached. - URL string - - // Options is a set of additional key/value pairs whose interpretation - // depends on the specified CA type. - Options map[string]string `json:",omitempty"` - - // CACert specifies which root CA is used by this external CA. This certificate must - // be in PEM format. - CACert string -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - DataPathPort uint32 - ForceNewCluster bool - Spec Spec - AutoLockManagers bool - Availability NodeAvailability - DefaultAddrPool []string - SubnetSize uint32 -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - RemoteAddrs []string - JoinToken string // accept by secret - Availability NodeAvailability -} - -// UnlockRequest is the request used to unlock a swarm. -type UnlockRequest struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" - // LocalNodeStateLocked LOCKED - LocalNodeStateLocked LocalNodeState = "locked" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int `json:",omitempty"` - Managers int `json:",omitempty"` - - Cluster *ClusterInfo `json:",omitempty"` - - Warnings []string `json:",omitempty"` -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go deleted file mode 100644 index d5a57df5..00000000 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ /dev/null @@ -1,192 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/swarm/runtime" -) - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" - // TaskStateRemove REMOVE - TaskStateRemove TaskState = "remove" - // TaskStateOrphaned ORPHANED - TaskStateOrphaned TaskState = "orphaned" -) - -// Task represents a task. -type Task struct { - ID string - Meta - Annotations - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. - // PluginSpec is only used when the `Runtime` field is set to `plugin` - // NetworkAttachmentSpec is used if the `Runtime` field is set to - // `attachment`. - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` - NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` - - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` - - // ForceUpdate is a counter that triggers an update even if no relevant - // parameters have been changed. - ForceUpdate uint64 - - Runtime RuntimeType `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory). -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) -type GenericResource struct { - NamedResourceSpec *NamedGenericResource `json:",omitempty"` - DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` -} - -// NamedGenericResource represents a "user defined" resource which is defined -// as a string. -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) -type NamedGenericResource struct { - Kind string `json:",omitempty"` - Value string `json:",omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:",omitempty"` - Value int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Resources `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` - Preferences []PlacementPreference `json:",omitempty"` - MaxReplicas uint64 `json:",omitempty"` - - // Platforms stores all the platforms that the image can run on. - // This field is used in the platform filter for scheduling. If empty, - // then the platform filter is off, meaning there are no scheduling restrictions. - Platforms []Platform `json:",omitempty"` -} - -// PlacementPreference provides a way to make the scheduler aware of factors -// such as topology. -type PlacementPreference struct { - Spread *SpreadOver -} - -// SpreadOver is a scheduling preference that instructs the scheduler to spread -// tasks evenly over groups of nodes identified by labels. -type SpreadOver struct { - // label descriptor, such as engine.labels.az - SpreadDescriptor string -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus *ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string - PID int - ExitCode int -} - -// PortStatus represents the port status of a task's host ports whose -// service has published host ports -type PortStatus struct { - Ports []PortConfig `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go deleted file mode 100644 index 84b6f073..00000000 --- a/vendor/github.com/docker/docker/api/types/time/duration_convert.go +++ /dev/null @@ -1,12 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "strconv" - "time" -) - -// DurationToSecondsString converts the specified duration to the number -// seconds it represents, formatted as a string. -func DurationToSecondsString(duration time.Duration) string { - return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) -} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go deleted file mode 100644 index ea3495ef..00000000 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ /dev/null @@ -1,129 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then it's an RFC3339 like timestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - if _, _, err := parseTimestamp(value); err != nil { - return "", fmt.Errorf("failed to parse value as time or duration: %q", value) - } - return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the -// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) -// if the incoming nanosecond portion is longer or shorter than 9 digits it is -// converted to nanoseconds. The expectation is that the seconds and -// seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) -// returns seconds as def(aultSeconds) if value == "" -func ParseTimestamps(value string, def int64) (int64, int64, error) { - if value == "" { - return def, 0, nil - } - return parseTimestamp(value) -} - -func parseTimestamp(value string) (int64, int64, error) { - sa := strings.SplitN(value, ".", 2) - s, err := strconv.ParseInt(sa[0], 10, 64) - if err != nil { - return s, 0, err - } - if len(sa) != 2 { - return s, 0, nil - } - n, err := strconv.ParseInt(sa[1], 10, 64) - if err != nil { - return s, n, err - } - // should already be in nanoseconds but just in case convert n to nanoseconds - n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) - return s, n, nil -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go deleted file mode 100644 index 0c3772d3..00000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go +++ /dev/null @@ -1,29 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// VolumeCreateBody Volume configuration -// swagger:model VolumeCreateBody -type VolumeCreateBody struct { - - // Name of the volume driver to use. - // Required: true - Driver string `json:"Driver"` - - // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. - // Required: true - DriverOpts map[string]string `json:"DriverOpts"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // The new volume's name. If not specified, Docker generates a name. - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go deleted file mode 100644 index 45c3c1c9..00000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go +++ /dev/null @@ -1,23 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -import "github.com/docker/docker/api/types" - -// VolumeListOKBody Volume list response -// swagger:model VolumeListOKBody -type VolumeListOKBody struct { - - // List of volumes - // Required: true - Volumes []*types.Volume `json:"Volumes"` - - // Warnings that occurred when fetching the list of volumes - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md deleted file mode 100644 index 992f1811..00000000 --- a/vendor/github.com/docker/docker/client/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Go client for the Docker Engine API - -The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. - -For example, to list running containers (the equivalent of `docker ps`): - -```go -package main - -import ( - "context" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" -) - -func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - if err != nil { - panic(err) - } - - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) - } -} -``` - -[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go deleted file mode 100644 index 3aae43e3..00000000 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// BuildCancel requests the daemon to cancel ongoing build request -func (cli *Client) BuildCancel(ctx context.Context, id string) error { - query := url.Values{} - query.Set("id", id) - - serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) - ensureReaderClosed(serverResp) - return err -} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go deleted file mode 100644 index 397d67cd..00000000 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ /dev/null @@ -1,45 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/pkg/errors" -) - -// BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { - if err := cli.NewVersionError("1.31", "build prune"); err != nil { - return nil, err - } - - report := types.BuildCachePruneReport{} - - query := url.Values{} - if opts.All { - query.Set("all", "1") - } - query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) - filters, err := filters.ToJSON(opts.Filters) - if err != nil { - return nil, errors.Wrap(err, "prune could not marshal filters option") - } - query.Set("filters", filters) - - serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - - if err != nil { - return nil, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return nil, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return &report, nil -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go deleted file mode 100644 index 921024fe..00000000 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -// CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { - resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go deleted file mode 100644 index 54f55fa7..00000000 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go deleted file mode 100644 index 66d46dd1..00000000 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" -) - -// CheckpointList returns the checkpoints of the given container in the docker host -func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { - var checkpoints []types.Checkpoint - - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return checkpoints, wrapResponseError(err, resp, "container", container) - } - - err = json.NewDecoder(resp.body).Decode(&checkpoints) - return checkpoints, err -} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go deleted file mode 100644 index b63d4d6d..00000000 --- a/vendor/github.com/docker/docker/client/client.go +++ /dev/null @@ -1,309 +0,0 @@ -/* -Package client is a Go client for the Docker Engine API. - -For more information about the Engine API, see the documentation: -https://docs.docker.com/engine/reference/api/ - -Usage - -You use the library by creating a client object and calling methods on it. The -client can be created either from environment variables with NewEnvClient, or -configured manually with NewClient. - -For example, to list running containers (the equivalent of "docker ps"): - - package main - - import ( - "context" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - ) - - func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - if err != nil { - panic(err) - } - - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) - } - } - -*/ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "fmt" - "net" - "net/http" - "net/url" - "path" - "strings" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/sockets" - "github.com/pkg/errors" -) - -// ErrRedirect is the error returned by checkRedirect when the request is non-GET. -var ErrRedirect = errors.New("unexpected redirect in response") - -// Client is the API client that performs all operations -// against a docker server. -type Client struct { - // scheme sets the scheme for the client - scheme string - // host holds the server address to connect to - host string - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // client used to send and receive http requests. - client *http.Client - // version of the server to talk to. - version string - // custom http headers configured by users. - customHTTPHeaders map[string]string - // manualOverride is set to true when the version was set by users. - manualOverride bool - - // negotiateVersion indicates if the client should automatically negotiate - // the API version to use when making requests. API version negotiation is - // performed on the first request, after which negotiated is set to "true" - // so that subsequent requests do not re-negotiate. - negotiateVersion bool - - // negotiated indicates that API version negotiation took place - negotiated bool -} - -// CheckRedirect specifies the policy for dealing with redirect responses: -// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. -// -// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . -// The Docker client (and by extension docker API client) can be made to send a request -// like POST /containers//start where what would normally be in the name section of the URL is empty. -// This triggers an HTTP 301 from the daemon. -// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. -// This behavior change manifests in the client in that before the 301 was not followed and -// the client did not generate an error, but now results in a message like Error response from daemon: page not found. -func CheckRedirect(req *http.Request, via []*http.Request) error { - if via[0].Method == http.MethodGet { - return http.ErrUseLastResponse - } - return ErrRedirect -} - -// NewClientWithOpts initializes a new API client with default values. It takes functors -// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -func NewClientWithOpts(ops ...Opt) (*Client, error) { - client, err := defaultHTTPClient(DefaultDockerHost) - if err != nil { - return nil, err - } - c := &Client{ - host: DefaultDockerHost, - version: api.DefaultVersion, - client: client, - proto: defaultProto, - addr: defaultAddr, - } - - for _, op := range ops { - if err := op(c); err != nil { - return nil, err - } - } - - if _, ok := c.client.Transport.(http.RoundTripper); !ok { - return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport) - } - if c.scheme == "" { - c.scheme = "http" - - tlsConfig := resolveTLSConfig(c.client.Transport) - if tlsConfig != nil { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. - c.scheme = "https" - } - } - - return c, nil -} - -func defaultHTTPClient(host string) (*http.Client, error) { - url, err := ParseHostURL(host) - if err != nil { - return nil, err - } - transport := new(http.Transport) - sockets.ConfigureTransport(transport, url.Scheme, url.Host) - return &http.Client{ - Transport: transport, - CheckRedirect: CheckRedirect, - }, nil -} - -// Close the transport used by the client -func (cli *Client) Close() error { - if t, ok := cli.client.Transport.(*http.Transport); ok { - t.CloseIdleConnections() - } - return nil -} - -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { - var apiPath string - if cli.negotiateVersion && !cli.negotiated { - cli.NegotiateAPIVersion(ctx) - } - if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = path.Join(cli.basePath, "/v"+v, p) - } else { - apiPath = path.Join(cli.basePath, p) - } - return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() -} - -// ClientVersion returns the API version used by this client. -func (cli *Client) ClientVersion() string { - return cli.version -} - -// NegotiateAPIVersion queries the API and updates the version to match the -// API version. Any errors are silently ignored. If a manual override is in place, -// either through the `DOCKER_API_VERSION` environment variable, or if the client -// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation -// will be performed. -func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - if !cli.manualOverride { - ping, _ := cli.Ping(ctx) - cli.negotiateAPIVersionPing(ping) - } -} - -// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion -// if the ping version is less than the default version. If a manual override is -// in place, either through the `DOCKER_API_VERSION` environment variable, or if -// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no -// negotiation is performed. -func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { - if !cli.manualOverride { - cli.negotiateAPIVersionPing(p) - } -} - -// negotiateAPIVersionPing queries the API and updates the version to match the -// API version. Any errors are silently ignored. -func (cli *Client) negotiateAPIVersionPing(p types.Ping) { - // try the latest version before versioning headers existed - if p.APIVersion == "" { - p.APIVersion = "1.24" - } - - // if the client is not initialized with a version, start with the latest supported version - if cli.version == "" { - cli.version = api.DefaultVersion - } - - // if server version is lower than the client version, downgrade - if versions.LessThan(p.APIVersion, cli.version) { - cli.version = p.APIVersion - } - - // Store the results, so that automatic API version negotiation (if enabled) - // won't be performed on the next request. - if cli.negotiateVersion { - cli.negotiated = true - } -} - -// DaemonHost returns the host address used by the client -func (cli *Client) DaemonHost() string { - return cli.host -} - -// HTTPClient returns a copy of the HTTP client bound to the server -func (cli *Client) HTTPClient() *http.Client { - return &*cli.client -} - -// ParseHostURL parses a url string, validates the string is a host url, and -// returns the parsed URL -func ParseHostURL(host string) (*url.URL, error) { - protoAddrParts := strings.SplitN(host, "://", 2) - if len(protoAddrParts) == 1 { - return nil, fmt.Errorf("unable to parse docker host `%s`", host) - } - - var basePath string - proto, addr := protoAddrParts[0], protoAddrParts[1] - if proto == "tcp" { - parsed, err := url.Parse("tcp://" + addr) - if err != nil { - return nil, err - } - addr = parsed.Host - basePath = parsed.Path - } - return &url.URL{ - Scheme: proto, - Host: addr, - Path: basePath, - }, nil -} - -// CustomHTTPHeaders returns the custom http headers stored by the client. -func (cli *Client) CustomHTTPHeaders() map[string]string { - m := make(map[string]string) - for k, v := range cli.customHTTPHeaders { - m[k] = v - } - return m -} - -// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. -// Deprecated: use WithHTTPHeaders when creating the client. -func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { - cli.customHTTPHeaders = headers -} - -// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. -// Used by `docker dial-stdio` (docker/cli#889). -func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { - return func(ctx context.Context) (net.Conn, error) { - if transport, ok := cli.client.Transport.(*http.Transport); ok { - if transport.DialContext != nil && transport.TLSClientConfig == nil { - return transport.DialContext(ctx, cli.proto, cli.addr) - } - } - return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) - } -} diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go deleted file mode 100644 index 54cdfc29..00000000 --- a/vendor/github.com/docker/docker/client/client_deprecated.go +++ /dev/null @@ -1,23 +0,0 @@ -package client - -import "net/http" - -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -// Deprecated: use NewClientWithOpts -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) -} - -// NewEnvClient initializes a new API client based on environment variables. -// See FromEnv for a list of support environment variables. -// -// Deprecated: use NewClientWithOpts(FromEnv) -func NewEnvClient() (*Client, error) { - return NewClientWithOpts(FromEnv) -} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go deleted file mode 100644 index 3d24470b..00000000 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build linux freebsd openbsd darwin - -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "unix:///var/run/docker.sock" - -const defaultProto = "unix" -const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go deleted file mode 100644 index c649e544..00000000 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "npipe:////./pipe/docker_engine" - -const defaultProto = "npipe" -const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go deleted file mode 100644 index ee7d411d..00000000 --- a/vendor/github.com/docker/docker/client/config_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigCreate creates a new Config. -func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { - var response types.ConfigCreateResponse - if err := cli.NewVersionError("1.30", "config create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/configs/create", nil, config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go deleted file mode 100644 index 7d0ce3e1..00000000 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigInspectWithRaw returns the config information with raw data -func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { - if id == "" { - return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} - } - if err := cli.NewVersionError("1.30", "config inspect"); err != nil { - return swarm.Config{}, nil, err - } - resp, err := cli.get(ctx, "/configs/"+id, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) - } - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return swarm.Config{}, nil, err - } - - var config swarm.Config - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&config) - - return config, body, err -} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go deleted file mode 100644 index 565acc6e..00000000 --- a/vendor/github.com/docker/docker/client/config_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigList returns the list of configs. -func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { - if err := cli.NewVersionError("1.30", "config list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/configs", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var configs []swarm.Config - err = json.NewDecoder(resp.body).Decode(&configs) - return configs, err -} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go deleted file mode 100644 index a708fcae..00000000 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ConfigRemove removes a Config. -func (cli *Client) ConfigRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.30", "config remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) - defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "config", id) -} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go deleted file mode 100644 index 39e59cf8..00000000 --- a/vendor/github.com/docker/docker/client/config_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigUpdate attempts to update a Config -func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { - if err := cli.NewVersionError("1.30", "config update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go deleted file mode 100644 index 88ba1ef6..00000000 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ /dev/null @@ -1,57 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - if options.Logs { - query.Set("logs", "1") - } - - headers := map[string][]string{"Content-Type": {"text/plain"}} - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go deleted file mode 100644 index 2966e88c..00000000 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ /dev/null @@ -1,55 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "errors" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ContainerCommit applies changes into a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { - var repository, tag string - if options.Reference != "" { - ref, err := reference.ParseNormalizedNamed(options.Reference) - if err != nil { - return types.IDResponse{}, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") - } - ref = reference.TagNameOnly(ref) - - if tagged, ok := ref.(reference.Tagged); ok { - tag = tagged.Tag() - } - repository = reference.FamiliarName(ref) - } - - query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) - query.Set("comment", options.Comment) - query.Set("author", options.Author) - for _, change := range options.Changes { - query.Add("changes", change) - } - if !options.Pause { - query.Set("pause", "0") - } - - var response types.IDResponse - resp, err := cli.post(ctx, "/commit", query, options.Config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go deleted file mode 100644 index bb278bf7..00000000 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ /dev/null @@ -1,103 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types" -) - -// ContainerStatPath returns Stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - - urlStr := "/containers/" + containerID + "/archive" - response, err := cli.head(ctx, urlStr, query, nil) - defer ensureReaderClosed(response) - if err != nil { - return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) - } - return getContainerPathStatFromHeader(response.header) -} - -// CopyToContainer copies content into the container filesystem. -// Note that `content` must be a Reader for a TAR archive -func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { - query := url.Values{} - query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. - // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. - if !options.AllowOverwriteDirWithFile { - query.Set("noOverwriteDirNonDir", "true") - } - - if options.CopyUIDGID { - query.Set("copyUIDGID", "true") - } - - apiPath := "/containers/" + containerID + "/archive" - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) - defer ensureReaderClosed(response) - if err != nil { - return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) - } - - // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior - if response.statusCode != http.StatusOK { - return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - return nil -} - -// CopyFromContainer gets the content from the container and returns it as a Reader -// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { - query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - - apiPath := "/containers/" + containerID + "/archive" - response, err := cli.get(ctx, apiPath, query, nil) - if err != nil { - return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) - } - - // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior - if response.statusCode != http.StatusOK { - return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - // In order to get the copy behavior right, we need to know information - // about both the source and the destination. The response headers include - // stat info about the source that we can use in deciding exactly how to - // copy it locally. Along with the stat info about the local destination, - // we have everything we need to handle the multiple possibilities there - // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) - if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) - } - return response.body, stat, err -} - -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat - - encodedStat := header.Get("X-Docker-Container-Path-Stat") - statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) - - err := json.NewDecoder(statDecoder).Decode(&stat) - if err != nil { - err = fmt.Errorf("unable to decode container path stat header: %s", err) - } - - return stat, err -} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go deleted file mode 100644 index 5b795e0c..00000000 --- a/vendor/github.com/docker/docker/client/container_create.go +++ /dev/null @@ -1,52 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" -) - -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// ContainerCreate creates a new container based in the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { - var response container.ContainerCreateCreatedBody - - if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { - return response, err - } - - // When using API 1.24 and under, the client is responsible for removing the container - if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { - hostConfig.AutoRemove = false - } - - query := url.Values{} - if containerName != "" { - query.Set("name", containerName) - } - - body := configWrapper{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go deleted file mode 100644 index 29dac849..00000000 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { - var changes []container.ContainerChangeResponseItem - - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return changes, err - } - - err = json.NewDecoder(serverResp.body).Decode(&changes) - return changes, err -} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go deleted file mode 100644 index e3ee755b..00000000 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ /dev/null @@ -1,54 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { - var response types.IDResponse - - if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { - return response, err - } - - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { - headers := map[string][]string{"Content-Type": {"application/json"}} - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go deleted file mode 100644 index d0c0a5cb..00000000 --- a/vendor/github.com/docker/docker/client/container_export.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return serverResp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go deleted file mode 100644 index c496bcff..00000000 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ /dev/null @@ -1,53 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - if containerID == "" { - return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} - -// ContainerInspectWithRaw returns the container information and its raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - if containerID == "" { - return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} - } - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) - } - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - var response types.ContainerJSON - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go deleted file mode 100644 index 4d6f1d23..00000000 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - query := url.Values{} - query.Set("signal", signal) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go deleted file mode 100644 index 1e7a63a9..00000000 --- a/vendor/github.com/docker/docker/client/container_list.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { - query := url.Values{} - - if options.All { - query.Set("all", "1") - } - - if options.Limit != -1 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Since != "" { - query.Set("since", options.Since) - } - - if options.Before != "" { - query.Set("before", options.Before) - } - - if options.Size { - query.Set("size", "1") - } - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/containers/json", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) - return containers, err -} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go deleted file mode 100644 index 5b6541f0..00000000 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ /dev/null @@ -1,80 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "until"`) - } - query.Set("until", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) - if err != nil { - return nil, wrapResponseError(err, resp, "container", container) - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go deleted file mode 100644 index 5e7271a3..00000000 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go deleted file mode 100644 index 04383dea..00000000 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ContainersPrune requests the daemon to delete unused data -func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { - var report types.ContainersPruneReport - - if err := cli.NewVersionError("1.25", "container prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go deleted file mode 100644 index df81461b..00000000 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ /dev/null @@ -1,27 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { - query := url.Values{} - if options.RemoveVolumes { - query.Set("v", "1") - } - if options.RemoveLinks { - query.Set("link", "1") - } - - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "container", containerID) -} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go deleted file mode 100644 index 240fdf55..00000000 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go deleted file mode 100644 index a9d4c0c7..00000000 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { - query := url.Values{} - query.Set("h", strconv.Itoa(int(height))) - query.Set("w", strconv.Itoa(int(width))) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go deleted file mode 100644 index 41e42196..00000000 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "time" - - timetypes "github.com/docker/docker/api/types/time" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon to wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { - query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go deleted file mode 100644 index c2e0b15d..00000000 --- a/vendor/github.com/docker/docker/client/container_start.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { - query := url.Values{} - if len(options.CheckpointID) != 0 { - query.Set("checkpoint", options.CheckpointID) - } - if len(options.CheckpointDir) != 0 { - query.Set("checkpoint-dir", options.CheckpointDir) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go deleted file mode 100644 index 6ef44c77..00000000 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return types.ContainerStats{}, err - } - - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err -} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go deleted file mode 100644 index 629d7ab6..00000000 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "time" - - timetypes "github.com/docker/docker/api/types/time" -) - -// ContainerStop stops a container. In case the container fails to stop -// gracefully within a time frame specified by the timeout argument, -// it is forcefully terminated (killed). -// -// If the timeout is nil, the container's StopTimeout value is used, if set, -// otherwise the engine default. A negative timeout value can be specified, -// meaning no timeout, i.e. no forceful termination is performed. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { - query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go deleted file mode 100644 index a5b78999..00000000 --- a/vendor/github.com/docker/docker/client/container_top.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strings" - - "github.com/docker/docker/api/types/container" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { - var response container.ContainerTopOKBody - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go deleted file mode 100644 index 1d8f8731..00000000 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go deleted file mode 100644 index 6917cf9f..00000000 --- a/vendor/github.com/docker/docker/client/container_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/container" -) - -// ContainerUpdate updates resources of a container -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { - var response container.ContainerUpdateOKBody - serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go deleted file mode 100644 index 6ab8c1da..00000000 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ /dev/null @@ -1,83 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerWait waits until the specified container is in a certain state -// indicated by the given condition, either "not-running" (default), -// "next-exit", or "removed". -// -// If this client's API version is before 1.30, condition is ignored and -// ContainerWait will return immediately with the two channels, as the server -// will wait as if the condition were "not-running". -// -// If this client's API version is at least 1.30, ContainerWait blocks until -// the request has been acknowledged by the server (with a response header), -// then returns two channels on which the caller can wait for the exit status -// of the container or an error if there was a problem either beginning the -// wait request or in getting the response. This allows the caller to -// synchronize ContainerWait with other calls, such as specifying a -// "next-exit" condition before issuing a ContainerStart request. -func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { - if versions.LessThan(cli.ClientVersion(), "1.30") { - return cli.legacyContainerWait(ctx, containerID) - } - - resultC := make(chan container.ContainerWaitOKBody) - errC := make(chan error, 1) - - query := url.Values{} - query.Set("condition", string(condition)) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) - if err != nil { - defer ensureReaderClosed(resp) - errC <- err - return resultC, errC - } - - go func() { - defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err - return - } - - resultC <- res - }() - - return resultC, errC -} - -// legacyContainerWait returns immediately and doesn't have an option to wait -// until the container is removed. -func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { - resultC := make(chan container.ContainerWaitOKBody) - errC := make(chan error) - - go func() { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - errC <- err - return - } - defer ensureReaderClosed(resp) - - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err - return - } - - resultC <- res - }() - - return resultC, errC -} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go deleted file mode 100644 index 354cd369..00000000 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" -) - -// DiskUsage requests the current data usage from the daemon -func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { - var du types.DiskUsage - - serverResp, err := cli.get(ctx, "/system/df", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return du, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { - return du, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return du, nil -} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go deleted file mode 100644 index f4e3794c..00000000 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - registrytypes "github.com/docker/docker/api/types/registry" -) - -// DistributionInspect returns the image digest with full Manifest -func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { - // Contact the registry to retrieve digest and platform information - var distributionInspect registrytypes.DistributionInspect - if image == "" { - return distributionInspect, objectNotFoundError{object: "distribution", id: image} - } - - if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { - return distributionInspect, err - } - var headers map[string][]string - - if encodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {encodedRegistryAuth}, - } - } - - resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) - defer ensureReaderClosed(resp) - if err != nil { - return distributionInspect, err - } - - err = json.NewDecoder(resp.body).Decode(&distributionInspect) - return distributionInspect, err -} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go deleted file mode 100644 index 001c1028..00000000 --- a/vendor/github.com/docker/docker/client/errors.go +++ /dev/null @@ -1,138 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "fmt" - "net/http" - - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// errConnectionFailed implements an error returned when connection failed. -type errConnectionFailed struct { - host string -} - -// Error returns a string representation of an errConnectionFailed -func (err errConnectionFailed) Error() string { - if err.host == "" { - return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" - } - return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) -} - -// IsErrConnectionFailed returns true if the error is caused by connection failed. -func IsErrConnectionFailed(err error) bool { - _, ok := errors.Cause(err).(errConnectionFailed) - return ok -} - -// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. -func ErrorConnectionFailed(host string) error { - return errConnectionFailed{host: host} -} - -// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility -type notFound interface { - error - NotFound() bool -} - -// IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. -func IsErrNotFound(err error) bool { - if _, ok := err.(notFound); ok { - return ok - } - return errdefs.IsNotFound(err) -} - -type objectNotFoundError struct { - object string - id string -} - -func (e objectNotFoundError) NotFound() {} - -func (e objectNotFoundError) Error() string { - return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) -} - -func wrapResponseError(err error, resp serverResponse, object, id string) error { - switch { - case err == nil: - return nil - case resp.statusCode == http.StatusNotFound: - return objectNotFoundError{object: object, id: id} - case resp.statusCode == http.StatusNotImplemented: - return errdefs.NotImplemented(err) - default: - return err - } -} - -// unauthorizedError represents an authorization error in a remote registry. -type unauthorizedError struct { - cause error -} - -// Error returns a string representation of an unauthorizedError -func (u unauthorizedError) Error() string { - return u.cause.Error() -} - -// IsErrUnauthorized returns true if the error is caused -// when a remote registry authentication fails -func IsErrUnauthorized(err error) bool { - if _, ok := err.(unauthorizedError); ok { - return ok - } - return errdefs.IsUnauthorized(err) -} - -type pluginPermissionDenied struct { - name string -} - -func (e pluginPermissionDenied) Error() string { - return "Permission denied while installing plugin " + e.name -} - -// IsErrPluginPermissionDenied returns true if the error is caused -// when a user denies a plugin's permissions -func IsErrPluginPermissionDenied(err error) bool { - _, ok := err.(pluginPermissionDenied) - return ok -} - -type notImplementedError struct { - message string -} - -func (e notImplementedError) Error() string { - return e.message -} - -func (e notImplementedError) NotImplemented() bool { - return true -} - -// IsErrNotImplemented returns true if the error is a NotImplemented error. -// This is returned by the API when a requested feature has not been -// implemented. -func IsErrNotImplemented(err error) bool { - if _, ok := err.(notImplementedError); ok { - return ok - } - return errdefs.IsNotImplemented(err) -} - -// NewVersionError returns an error if the APIVersion required -// if less than the current supported version -func (cli *Client) NewVersionError(APIrequired, feature string) error { - if cli.version != "" && versions.LessThan(cli.version, APIrequired) { - return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go deleted file mode 100644 index 6e565389..00000000 --- a/vendor/github.com/docker/docker/client/events.go +++ /dev/null @@ -1,101 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - timetypes "github.com/docker/docker/api/types/time" -) - -// Events returns a stream of events in the daemon. It's up to the caller to close the stream -// by cancelling the context. Once the stream has been completely read an io.EOF error will -// be sent over the error channel. If an error is sent all processing will be stopped. It's up -// to the caller to reopen the stream in the event of an error by reinvoking this method. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { - - messages := make(chan events.Message) - errs := make(chan error, 1) - - started := make(chan struct{}) - go func() { - defer close(errs) - - query, err := buildEventsQueryParams(cli.version, options) - if err != nil { - close(started) - errs <- err - return - } - - resp, err := cli.get(ctx, "/events", query, nil) - if err != nil { - close(started) - errs <- err - return - } - defer resp.body.Close() - - decoder := json.NewDecoder(resp.body) - - close(started) - for { - select { - case <-ctx.Done(): - errs <- ctx.Err() - return - default: - var event events.Message - if err := decoder.Decode(&event); err != nil { - errs <- err - return - } - - select { - case messages <- event: - case <-ctx.Done(): - errs <- ctx.Err() - return - } - } - } - }() - <-started - - return messages, errs -} - -func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go deleted file mode 100644 index e9c9a752..00000000 --- a/vendor/github.com/docker/docker/client/hijack.go +++ /dev/null @@ -1,143 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bufio" - "context" - "crypto/tls" - "fmt" - "net" - "net/http" - "net/http/httputil" - "net/url" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/go-connections/sockets" - "github.com/pkg/errors" -) - -// postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) - if err != nil { - return types.HijackedResponse{}, err - } - - apiPath := cli.getAPIPath(ctx, path, query) - req, err := http.NewRequest("POST", apiPath, bodyEncoded) - if err != nil { - return types.HijackedResponse{}, err - } - req = cli.addHeaders(req, headers) - - conn, err := cli.setupHijackConn(ctx, req, "tcp") - if err != nil { - return types.HijackedResponse{}, err - } - - return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err -} - -// DialHijack returns a hijacked connection with negotiated protocol proto. -func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest("POST", url, nil) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, meta) - - return cli.setupHijackConn(ctx, req, proto) -} - -// fallbackDial is used when WithDialer() was not called. -// See cli.Dialer(). -func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - return tls.Dial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} - -func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) { - req.Host = cli.addr - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", proto) - - dialer := cli.Dialer() - conn, err := dialer(ctx) - if err != nil { - return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - resp, err := clientconn.Do(req) - if err != httputil.ErrPersistEOF { - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusSwitchingProtocols { - resp.Body.Close() - return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) - } - } - - c, br := clientconn.Hijack() - if br.Buffered() > 0 { - // If there is buffered content, wrap the connection. We return an - // object that implements CloseWrite iff the underlying connection - // implements it. - if _, ok := c.(types.CloseWriter); ok { - c = &hijackedConnCloseWriter{&hijackedConn{c, br}} - } else { - c = &hijackedConn{c, br} - } - } else { - br.Reset(nil) - } - - return c, nil -} - -// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case -// that a) there was already buffered data in the http layer when Hijack() was -// called, and b) the underlying net.Conn does *not* implement CloseWrite(). -// hijackedConn does not implement CloseWrite() either. -type hijackedConn struct { - net.Conn - r *bufio.Reader -} - -func (c *hijackedConn) Read(b []byte) (int, error) { - return c.r.Read(b) -} - -// hijackedConnCloseWriter is a hijackedConn which additionally implements -// CloseWrite(). It is returned by setupHijackConn in the case that a) there -// was already buffered data in the http layer when Hijack() was called, and b) -// the underlying net.Conn *does* implement CloseWrite(). -type hijackedConnCloseWriter struct { - *hijackedConn -} - -var _ types.CloseWriter = &hijackedConnCloseWriter{} - -func (c *hijackedConnCloseWriter) CloseWrite() error { - conn := c.Conn.(types.CloseWriter) - return conn.CloseWrite() -} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go deleted file mode 100644 index 8fcf9950..00000000 --- a/vendor/github.com/docker/docker/client/image_build.go +++ /dev/null @@ -1,146 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/base64" - "encoding/json" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" -) - -// ImageBuild sends request to the daemon to build images. -// The Body in the response implement an io.ReadCloser and it's up to the caller to -// close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := cli.imageBuildOptionsToQuery(options) - if err != nil { - return types.ImageBuildResponse{}, err - } - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(options.AuthConfigs) - if err != nil { - return types.ImageBuildResponse{}, err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - - headers.Set("Content-Type", "application/x-tar") - - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) - if err != nil { - return types.ImageBuildResponse{}, err - } - - osType := getDockerOS(serverResp.header.Get("Server")) - - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: osType, - }, nil -} - -func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - "securityopt": options.SecurityOpt, - "extrahosts": options.ExtraHosts, - } - if options.SuppressOutput { - query.Set("q", "1") - } - if options.RemoteContext != "" { - query.Set("remote", options.RemoteContext) - } - if options.NoCache { - query.Set("nocache", "1") - } - if options.Remove { - query.Set("rm", "1") - } else { - query.Set("rm", "0") - } - - if options.ForceRemove { - query.Set("forcerm", "1") - } - - if options.PullParent { - query.Set("pull", "1") - } - - if options.Squash { - if err := cli.NewVersionError("1.25", "squash"); err != nil { - return query, err - } - query.Set("squash", "1") - } - - if !container.Isolation.IsDefault(options.Isolation) { - query.Set("isolation", string(options.Isolation)) - } - - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("networkmode", options.NetworkMode) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - query.Set("target", options.Target) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err - } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err - } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err - } - query.Set("labels", string(labelsJSON)) - - cacheFromJSON, err := json.Marshal(options.CacheFrom) - if err != nil { - return query, err - } - query.Set("cachefrom", string(cacheFromJSON)) - if options.SessionID != "" { - query.Set("session", options.SessionID) - } - if options.Platform != "" { - if err := cli.NewVersionError("1.32", "platform"); err != nil { - return query, err - } - query.Set("platform", strings.ToLower(options.Platform)) - } - if options.BuildID != "" { - query.Set("buildid", options.BuildID) - } - query.Set("version", string(options.Version)) - - if options.Outputs != nil { - outputsJSON, err := json.Marshal(options.Outputs) - if err != nil { - return query, err - } - query.Set("outputs", string(outputsJSON)) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go deleted file mode 100644 index 23938047..00000000 --- a/vendor/github.com/docker/docker/client/image_create.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImageCreate creates a new image based in the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - query.Set("tag", getAPITagFromNamedRef(ref)) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go deleted file mode 100644 index b5bea10d..00000000 --- a/vendor/github.com/docker/docker/client/image_history.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/image" -) - -// ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { - var history []image.HistoryResponseItem - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return history, err - } - - err = json.NewDecoder(serverResp.body).Decode(&history) - return history, err -} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go deleted file mode 100644 index c2972ea9..00000000 --- a/vendor/github.com/docker/docker/client/image_import.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImageImport creates a new image based in the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { - if ref != "" { - //Check if the given image name can be resolved - if _, err := reference.ParseNormalizedNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go deleted file mode 100644 index 1eb8dce0..00000000 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types" -) - -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { - if imageID == "" { - return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} - } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) - } - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ImageInspect{}, nil, err - } - - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go deleted file mode 100644 index 4fa8c006..00000000 --- a/vendor/github.com/docker/docker/client/image_list.go +++ /dev/null @@ -1,45 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/versions" -) - -// ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { - var images []types.ImageSummary - query := url.Values{} - - optionFilters := options.Filters - referenceFilters := optionFilters.Get("reference") - if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { - query.Set("filter", referenceFilters[0]) - for _, filterValue := range referenceFilters { - optionFilters.Del("reference", filterValue) - } - } - if optionFilters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.All { - query.Set("all", "1") - } - - serverResp, err := cli.get(ctx, "/images/json", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return images, err - } - - err = json.NewDecoder(serverResp.body).Decode(&images) - return images, err -} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go deleted file mode 100644 index 91016e49..00000000 --- a/vendor/github.com/docker/docker/client/image_load.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") - } - headers := map[string][]string{"Content-Type": {"application/x-tar"}} - resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) - if err != nil { - return types.ImageLoadResponse{}, err - } - return types.ImageLoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go deleted file mode 100644 index 56af6d7f..00000000 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ImagesPrune requests the daemon to delete unused data -func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { - var report types.ImagesPruneReport - - if err := cli.NewVersionError("1.25", "image prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go deleted file mode 100644 index a2397559..00000000 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ /dev/null @@ -1,64 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/errdefs" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(refStr) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - if !options.All { - query.Set("tag", getAPITagFromNamedRef(ref)) - } - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -// getAPITagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api expects -// digests to be sent as tags and makes a distinction between the name -// and tag/digest part of a reference. -func getAPITagFromNamedRef(ref reference.Named) string { - if digested, ok := ref.(reference.Digested); ok { - return digested.Digest().String() - } - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - return tagged.Tag() - } - return "" -} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go deleted file mode 100644 index 49d412ee..00000000 --- a/vendor/github.com/docker/docker/client/image_push.go +++ /dev/null @@ -1,55 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "errors" - "io" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/errdefs" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - tag := "" - name := reference.FamiliarName(ref) - - if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged { - tag = nameTaggedRef.Tag() - } - - query := url.Values{} - query.Set("tag", tag) - - resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go deleted file mode 100644 index 84a41af0..00000000 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { - query := url.Values{} - - if options.Force { - query.Set("force", "1") - } - if !options.PruneChildren { - query.Set("noprune", "1") - } - - var dels []types.ImageDeleteResponseItem - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return dels, wrapResponseError(err, resp, "image", imageID) - } - - err = json.NewDecoder(resp.body).Decode(&dels) - return dels, err -} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go deleted file mode 100644 index d1314e4b..00000000 --- a/vendor/github.com/docker/docker/client/image_save.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - - resp, err := cli.get(ctx, "/images/get", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go deleted file mode 100644 index 82955a74..00000000 --- a/vendor/github.com/docker/docker/client/image_search.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" -) - -// ImageSearch makes the docker host to search by a term in a remote registry. -// The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { - var results []registry.SearchResult - query := url.Values{} - query.Set("term", term) - query.Set("limit", fmt.Sprintf("%d", options.Limit)) - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } - - resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - defer ensureReaderClosed(resp) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return results, privilegeErr - } - resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) - } - if err != nil { - return results, err - } - - err = json.NewDecoder(resp.body).Decode(&results) - return results, err -} - -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go deleted file mode 100644 index 5652bfc2..00000000 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/pkg/errors" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, source, target string) error { - if _, err := reference.ParseAnyReference(source); err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) - } - - ref, err := reference.ParseNormalizedNamed(target) - if err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - ref = reference.TagNameOnly(ref) - - query := url.Values{} - query.Set("repo", reference.FamiliarName(ref)) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - - resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go deleted file mode 100644 index c856704e..00000000 --- a/vendor/github.com/docker/docker/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return info, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go deleted file mode 100644 index cde64be4..00000000 --- a/vendor/github.com/docker/docker/client/interface.go +++ /dev/null @@ -1,199 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net" - "net/http" - "time" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - volumetypes "github.com/docker/docker/api/types/volume" -) - -// CommonAPIClient is the common methods between stable and experimental versions of APIClient. -type CommonAPIClient interface { - ConfigAPIClient - ContainerAPIClient - DistributionAPIClient - ImageAPIClient - NodeAPIClient - NetworkAPIClient - PluginAPIClient - ServiceAPIClient - SwarmAPIClient - SecretAPIClient - SystemAPIClient - VolumeAPIClient - ClientVersion() string - DaemonHost() string - HTTPClient() *http.Client - ServerVersion(ctx context.Context) (types.Version, error) - NegotiateAPIVersion(ctx context.Context) - NegotiateAPIVersionPing(types.Ping) - DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) - Dialer() func(context.Context) (net.Conn, error) - Close() error -} - -// ContainerAPIClient defines API client methods for the containers -type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error) - ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) - ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error - ContainerStop(ctx context.Context, container string, timeout *time.Duration) error - ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error - ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) -} - -// DistributionAPIClient defines API client methods for the registry -type DistributionAPIClient interface { - DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) -} - -// ImageAPIClient defines API client methods for the images -type ImageAPIClient interface { - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) - BuildCancel(ctx context.Context, id string) error - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string) error - ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) -} - -// NetworkAPIClient defines API client methods for the networks -type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, network, container string, force bool) error - NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, network string) error - NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) -} - -// NodeAPIClient defines API client methods for the nodes -type NodeAPIClient interface { - NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) - NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) - NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error - NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error -} - -// PluginAPIClient defines API client methods for the plugins -type PluginAPIClient interface { - PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) - PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error - PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error - PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error - PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) - PluginSet(ctx context.Context, name string, args []string) error - PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) - PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error -} - -// ServiceAPIClient defines API client methods for the services -type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) - ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) - ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) - ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) - TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) -} - -// SwarmAPIClient defines API client methods for the swarm -type SwarmAPIClient interface { - SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) - SwarmJoin(ctx context.Context, req swarm.JoinRequest) error - SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) - SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error - SwarmLeave(ctx context.Context, force bool) error - SwarmInspect(ctx context.Context) (swarm.Swarm, error) - SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error -} - -// SystemAPIClient defines API client methods for the system -type SystemAPIClient interface { - Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) - Info(ctx context.Context) (types.Info, error) - RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) - DiskUsage(ctx context.Context) (types.DiskUsage, error) - Ping(ctx context.Context) (types.Ping, error) -} - -// VolumeAPIClient defines API client methods for the volumes -type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) - VolumeRemove(ctx context.Context, volumeID string, force bool) error - VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) -} - -// SecretAPIClient defines API client methods for secrets -type SecretAPIClient interface { - SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) - SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) - SecretRemove(ctx context.Context, id string) error - SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) - SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error -} - -// ConfigAPIClient defines API client methods for configs -type ConfigAPIClient interface { - ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) - ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) - ConfigRemove(ctx context.Context, id string) error - ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) - ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error -} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go deleted file mode 100644 index 402ffb51..00000000 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -type apiClientExperimental interface { - CheckpointAPIClient -} - -// CheckpointAPIClient defines API client methods for the checkpoints -type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error - CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error - CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) -} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go deleted file mode 100644 index 5502cd74..00000000 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient - apiClientExperimental -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go deleted file mode 100644 index f0585206..00000000 --- a/vendor/github.com/docker/docker/client/login.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns unauthorizedError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - defer ensureReaderClosed(resp) - - if err != nil { - return registry.AuthenticateOKBody{}, err - } - - var response registry.AuthenticateOKBody - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go deleted file mode 100644 index 57189461..00000000 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go deleted file mode 100644 index 278d9383..00000000 --- a/vendor/github.com/docker/docker/client/network_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, - Name: name, - } - var response types.NetworkCreateResponse - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go deleted file mode 100644 index dd156766..00000000 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go deleted file mode 100644 index 89a05b30..00000000 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ /dev/null @@ -1,49 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "net/url" - - "github.com/docker/docker/api/types" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) - return networkResource, err -} - -// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { - if networkID == "" { - return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} - } - var ( - networkResource types.NetworkResource - resp serverResponse - err error - ) - query := url.Values{} - if options.Verbose { - query.Set("verbose", "true") - } - if options.Scope != "" { - query.Set("scope", options.Scope) - } - resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return networkResource, nil, wrapResponseError(err, resp, "network", networkID) - } - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return networkResource, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&networkResource) - return networkResource, body, err -} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go deleted file mode 100644 index 7130c136..00000000 --- a/vendor/github.com/docker/docker/client/network_list.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []types.NetworkResource - resp, err := cli.get(ctx, "/networks", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.body).Decode(&networkResources) - return networkResources, err -} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go deleted file mode 100644 index cebb1882..00000000 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// NetworksPrune requests the daemon to delete unused networks -func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { - var report types.NetworksPruneReport - - if err := cli.NewVersionError("1.25", "network prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving network prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go deleted file mode 100644 index e71b16d8..00000000 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "network", networkID) -} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go deleted file mode 100644 index d296c9fd..00000000 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeInspectWithRaw returns the node information. -func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - if nodeID == "" { - return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} - } - serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) - } - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Node{}, nil, err - } - - var response swarm.Node - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go deleted file mode 100644 index c212906b..00000000 --- a/vendor/github.com/docker/docker/client/node_list.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// NodeList returns the list of nodes. -func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/nodes", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var nodes []swarm.Node - err = json.NewDecoder(resp.body).Decode(&nodes) - return nodes, err -} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go deleted file mode 100644 index 03ab8780..00000000 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// NodeRemove removes a Node. -func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) - defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "node", nodeID) -} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go deleted file mode 100644 index de32a617..00000000 --- a/vendor/github.com/docker/docker/client/node_update.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeUpdate updates a Node. -func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go deleted file mode 100644 index 6f77f095..00000000 --- a/vendor/github.com/docker/docker/client/options.go +++ /dev/null @@ -1,172 +0,0 @@ -package client - -import ( - "context" - "net" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" -) - -// Opt is a configuration option to initialize a client -type Opt func(*Client) error - -// FromEnv configures the client with values from environment variables. -// -// Supported environment variables: -// DOCKER_HOST to set the url to the docker server. -// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// DOCKER_CERT_PATH to load the TLS certificates from. -// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. -func FromEnv(c *Client) error { - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return err - } - - c.client = &http.Client{ - Transport: &http.Transport{TLSClientConfig: tlsc}, - CheckRedirect: CheckRedirect, - } - } - - if host := os.Getenv("DOCKER_HOST"); host != "" { - if err := WithHost(host)(c); err != nil { - return err - } - } - - if version := os.Getenv("DOCKER_API_VERSION"); version != "" { - if err := WithVersion(version)(c); err != nil { - return err - } - } - return nil -} - -// WithDialer applies the dialer.DialContext to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. -// Deprecated: use WithDialContext -func WithDialer(dialer *net.Dialer) Opt { - return WithDialContext(dialer.DialContext) -} - -// WithDialContext applies the dialer to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. -func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { - return func(c *Client) error { - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.DialContext = dialContext - return nil - } - return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) - } -} - -// WithHost overrides the client host with the specified one. -func WithHost(host string) Opt { - return func(c *Client) error { - hostURL, err := ParseHostURL(host) - if err != nil { - return err - } - c.host = host - c.proto = hostURL.Scheme - c.addr = hostURL.Host - c.basePath = hostURL.Path - if transport, ok := c.client.Transport.(*http.Transport); ok { - return sockets.ConfigureTransport(transport, c.proto, c.addr) - } - return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) - } -} - -// WithHTTPClient overrides the client http client with the specified one -func WithHTTPClient(client *http.Client) Opt { - return func(c *Client) error { - if client != nil { - c.client = client - } - return nil - } -} - -// WithTimeout configures the time limit for requests made by the HTTP client -func WithTimeout(timeout time.Duration) Opt { - return func(c *Client) error { - c.client.Timeout = timeout - return nil - } -} - -// WithHTTPHeaders overrides the client default http headers -func WithHTTPHeaders(headers map[string]string) Opt { - return func(c *Client) error { - c.customHTTPHeaders = headers - return nil - } -} - -// WithScheme overrides the client scheme with the specified one -func WithScheme(scheme string) Opt { - return func(c *Client) error { - c.scheme = scheme - return nil - } -} - -// WithTLSClientConfig applies a tls config to the client transport. -func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { - return func(c *Client) error { - opts := tlsconfig.Options{ - CAFile: cacertPath, - CertFile: certPath, - KeyFile: keyPath, - ExclusiveRootPools: true, - } - config, err := tlsconfig.Client(opts) - if err != nil { - return errors.Wrap(err, "failed to create tls config") - } - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.TLSClientConfig = config - return nil - } - return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) - } -} - -// WithVersion overrides the client version with the specified one. If an empty -// version is specified, the value will be ignored to allow version negotiation. -func WithVersion(version string) Opt { - return func(c *Client) error { - if version != "" { - c.version = version - c.manualOverride = true - } - return nil - } -} - -// WithAPIVersionNegotiation enables automatic API version negotiation for the client. -// With this option enabled, the client automatically negotiates the API version -// to use when making requests. API version negotiation is performed on the first -// request; subsequent requests will not re-negotiate. -func WithAPIVersionNegotiation() Opt { - return func(c *Client) error { - c.negotiateVersion = true - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go deleted file mode 100644 index 4553e0fb..00000000 --- a/vendor/github.com/docker/docker/client/ping.go +++ /dev/null @@ -1,64 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/http" - "path" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/errdefs" -) - -// Ping pings the server and returns the value of the "Docker-Experimental", -// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use -// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported -// by the daemon. -func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { - var ping types.Ping - - // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() - // because ping requests are used during API version negotiation, so we want - // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping - req, err := cli.buildRequest("HEAD", path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err := cli.doRequest(ctx, req) - if err == nil { - defer ensureReaderClosed(serverResp) - switch serverResp.statusCode { - case http.StatusOK, http.StatusInternalServerError: - // Server handled the request, so parse the response - return parsePingResponse(cli, serverResp) - } - } - - req, err = cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err = cli.doRequest(ctx, req) - defer ensureReaderClosed(serverResp) - if err != nil { - return ping, err - } - return parsePingResponse(cli, serverResp) -} - -func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { - var ping types.Ping - if resp.header == nil { - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) - } - ping.APIVersion = resp.header.Get("API-Version") - ping.OSType = resp.header.Get("OSType") - if resp.header.Get("Docker-Experimental") == "true" { - ping.Experimental = true - } - if bv := resp.header.Get("Builder-Version"); bv != "" { - ping.BuilderVersion = types.BuilderVersion(bv) - } - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) -} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go deleted file mode 100644 index b95dbaf6..00000000 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginCreate creates a plugin -func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { - headers := http.Header(make(map[string][]string)) - headers.Set("Content-Type", "application/x-tar") - - query := url.Values{} - query.Set("name", createOptions.RepoName) - - resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go deleted file mode 100644 index 01f6574f..00000000 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginDisable disables a plugin -func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go deleted file mode 100644 index 736da48b..00000000 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" -) - -// PluginEnable enables a plugin -func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { - query := url.Values{} - query.Set("timeout", strconv.Itoa(options.Timeout)) - - resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go deleted file mode 100644 index 81b89732..00000000 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types" -) - -// PluginInspectWithRaw inspects an existing plugin -func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { - if name == "" { - return nil, nil, objectNotFoundError{object: "plugin", id: name} - } - resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, nil, wrapResponseError(err, resp, "plugin", name) - } - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return nil, nil, err - } - var p types.Plugin - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&p) - return &p, body, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go deleted file mode 100644 index 012afe61..00000000 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ /dev/null @@ -1,113 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "io" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// PluginInstall installs a plugin -func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - // set name for plugin pull, if empty should default to remote reference - query.Set("name", name) - - resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) - if err != nil { - return nil, err - } - - name = resp.header.Get("Docker-Plugin-Name") - - pr, pw := io.Pipe() - go func() { // todo: the client should probably be designed more around the actual api - _, err := io.Copy(pw, resp.body) - if err != nil { - pw.CloseWithError(err) - return - } - defer func() { - if err != nil { - delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) - ensureReaderClosed(delResp) - } - }() - if len(options.Args) > 0 { - if err := cli.PluginSet(ctx, name, options.Args); err != nil { - pw.CloseWithError(err) - return - } - } - - if options.Disabled { - pw.Close() - return - } - - enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - pw.CloseWithError(enableErr) - }() - return pr, nil -} - -func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/plugins/privileges", query, headers) -} - -func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/plugins/pull", query, privileges, headers) -} - -func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { - resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - // todo: do inspect before to check existing name before checking privileges - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - ensureReaderClosed(resp) - return nil, privilegeErr - } - options.RegistryAuth = newAuthHeader - resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - } - if err != nil { - ensureReaderClosed(resp) - return nil, err - } - - var privileges types.PluginPrivileges - if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { - ensureReaderClosed(resp) - return nil, err - } - ensureReaderClosed(resp) - - if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { - accept, err := options.AcceptPermissionsFunc(privileges) - if err != nil { - return nil, err - } - if !accept { - return nil, pluginPermissionDenied{options.RemoteRef} - } - } - return privileges, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go deleted file mode 100644 index 8285cecd..00000000 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// PluginList returns the installed plugins -func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { - var plugins types.PluginsListResponse - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return plugins, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/plugins", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return plugins, wrapResponseError(err, resp, "plugin", "") - } - - err = json.NewDecoder(resp.body).Decode(&plugins) - return plugins, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go deleted file mode 100644 index d20bfe84..00000000 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" -) - -// PluginPush pushes a plugin to a registry -func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go deleted file mode 100644 index 51ca1040..00000000 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginRemove removes a plugin -func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) - defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "plugin", name) -} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go deleted file mode 100644 index dcf5752c..00000000 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ /dev/null @@ -1,12 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" -) - -// PluginSet modifies settings for an existing plugin -func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { - resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go deleted file mode 100644 index 115cea94..00000000 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ /dev/null @@ -1,39 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/pkg/errors" -) - -// PluginUpgrade upgrades a plugin -func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { - return nil, err - } - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) -} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go deleted file mode 100644 index 3078335e..00000000 --- a/vendor/github.com/docker/docker/client/request.go +++ /dev/null @@ -1,273 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int - reqURL *url.URL -} - -// head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) -} - -// get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "GET", path, query, nil, headers) -} - -// post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, "POST", path, query, body, headers) -} - -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "POST", path, query, body, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, "PUT", path, query, body, headers) -} - -// putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "PUT", path, query, body, headers) -} - -// delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) -} - -type headers map[string][]string - -func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { - if obj == nil { - return nil, headers, nil - } - - body, err := encodeData(obj) - if err != nil { - return nil, headers, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} - return body, headers, nil -} - -func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { - expectedPayload := (method == "POST" || method == "PUT") - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := http.NewRequest(method, path, body) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, headers) - - if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" - } - - req.URL.Host = cli.addr - req.URL.Scheme = cli.scheme - - if expectedPayload && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "text/plain") - } - return req, nil -} - -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) - if err != nil { - return serverResponse{}, err - } - resp, err := cli.doRequest(ctx, req) - if err != nil { - return resp, errdefs.FromStatusCode(err, resp.statusCode) - } - err = cli.checkResponseErr(resp) - return resp, errdefs.FromStatusCode(err, resp.statusCode) -} - -func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - - req = req.WithContext(ctx) - resp, err := cli.client.Do(req) - if err != nil { - if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) - } - - if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, errors.Wrap(err, "The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings") - } - - // Don't decorate context sentinel errors; users may be comparing to - // them directly. - switch err { - case context.Canceled, context.DeadlineExceeded: - return serverResp, err - } - - if nErr, ok := err.(*url.Error); ok { - if nErr, ok := nErr.Err.(*net.OpError); ok { - if os.IsPermission(nErr.Err) { - return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) - } - } - } - - if err, ok := err.(net.Error); ok { - if err.Timeout() { - return serverResp, ErrorConnectionFailed(cli.host) - } - if !err.Temporary() { - if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { - return serverResp, ErrorConnectionFailed(cli.host) - } - } - } - - // Although there's not a strongly typed error for this in go-winio, - // lots of people are using the default configuration for the docker - // daemon on Windows where the daemon is listening on a named pipe - // `//./pipe/docker_engine, and the client must be running elevated. - // Give users a clue rather than the not-overly useful message - // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: - // open //./pipe/docker_engine: The system cannot find the file specified.`. - // Note we can't string compare "The system cannot find the file specified" as - // this is localised - for example in French the error would be - // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` - if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") - } - - return serverResp, errors.Wrap(err, "error during connect") - } - - if resp != nil { - serverResp.statusCode = resp.StatusCode - serverResp.body = resp.Body - serverResp.header = resp.Header - } - return serverResp, nil -} - -func (cli *Client) checkResponseErr(serverResp serverResponse) error { - if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { - return nil - } - - var body []byte - var err error - if serverResp.body != nil { - bodyMax := 1 * 1024 * 1024 // 1 MiB - bodyR := &io.LimitedReader{ - R: serverResp.body, - N: int64(bodyMax), - } - body, err = ioutil.ReadAll(bodyR) - if err != nil { - return err - } - if bodyR.N == 0 { - return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) - } - } - if len(body) == 0 { - return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) - } - - var ct string - if serverResp.header != nil { - ct = serverResp.header.Get("Content-Type") - } - - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return errors.Wrap(err, "Error reading JSON") - } - errorMessage = strings.TrimSpace(errorResponse.Message) - } else { - errorMessage = strings.TrimSpace(string(body)) - } - - return errors.Wrap(errors.New(errorMessage), "Error response from daemon") -} - -func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { - // Add CLI Config's HTTP Headers BEFORE we set the Docker headers - // then the user can't change OUR headers - for k, v := range cli.customHTTPHeaders { - if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { - continue - } - req.Header.Set(k, v) - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - return req -} - -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { - return nil, err - } - } - return params, nil -} - -func ensureReaderClosed(response serverResponse) { - if response.body != nil { - // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, response.body, 512) - response.body.Close() - } -} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go deleted file mode 100644 index fd5b9141..00000000 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// SecretCreate creates a new Secret. -func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - var response types.SecretCreateResponse - if err := cli.NewVersionError("1.25", "secret create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go deleted file mode 100644 index d093916c..00000000 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretInspectWithRaw returns the secret information with raw data -func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { - return swarm.Secret{}, nil, err - } - if id == "" { - return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} - } - resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) - } - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return swarm.Secret{}, nil, err - } - - var secret swarm.Secret - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&secret) - - return secret, body, err -} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go deleted file mode 100644 index a0289c9f..00000000 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// SecretList returns the list of secrets. -func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if err := cli.NewVersionError("1.25", "secret list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/secrets", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var secrets []swarm.Secret - err = json.NewDecoder(resp.body).Decode(&secrets) - return secrets, err -} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go deleted file mode 100644 index c16f5558..00000000 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// SecretRemove removes a Secret. -func (cli *Client) SecretRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.25", "secret remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) - defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "secret", id) -} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go deleted file mode 100644 index 164256bb..00000000 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretUpdate attempts to update a Secret -func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - if err := cli.NewVersionError("1.25", "secret update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go deleted file mode 100644 index 620fc6cf..00000000 --- a/vendor/github.com/docker/docker/client/service_create.go +++ /dev/null @@ -1,166 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// ServiceCreate creates a new Service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var distErr error - - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} - } - - // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container - if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { - service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} - } - - if err := validateServiceSpec(service); err != nil { - return types.ServiceCreateResponse{}, err - } - - // ensure that the image is tagged - var imgPlatforms []swarm.Platform - if service.TaskTemplate.ContainerSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.ContainerSpec.Image = img - } - } - } - - // ensure that the image is tagged - if service.TaskTemplate.PluginSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.PluginSpec.Remote = img - } - } - } - - if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { - service.TaskTemplate.Placement = &swarm.Placement{} - } - if len(imgPlatforms) > 0 { - service.TaskTemplate.Placement.Platforms = imgPlatforms - } - - var response types.ServiceCreateResponse - resp, err := cli.post(ctx, "/services/create", nil, service, headers) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - - if distErr != nil { - response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) - } - - return response, err -} - -func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { - distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) - var platforms []swarm.Platform - if err != nil { - return "", nil, err - } - - imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) - - if len(distributionInspect.Platforms) > 0 { - platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) - for _, p := range distributionInspect.Platforms { - // clear architecture field for arm. This is a temporary patch to address - // https://github.com/docker/swarmkit/issues/2294. The issue is that while - // image manifests report "arm" as the architecture, the node reports - // something like "armv7l" (includes the variant), which causes arm images - // to stop working with swarm mode. This patch removes the architecture - // constraint for arm images to ensure tasks get scheduled. - arch := p.Architecture - if strings.ToLower(arch) == "arm" { - arch = "" - } - platforms = append(platforms, swarm.Platform{ - Architecture: arch, - OS: p.OS, - }) - } - } - return imageWithDigest, platforms, err -} - -// imageWithDigestString takes an image string and a digest, and updates -// the image string if it didn't originally contain a digest. It returns -// an empty string if there are no updates. -func imageWithDigestString(image string, dgst digest.Digest) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { - // ensure that image gets a default tag if none is provided - img, err := reference.WithDigest(namedRef, dgst) - if err == nil { - return reference.FamiliarString(img) - } - } - } - return "" -} - -// imageWithTagString takes an image string, and returns a tagged image -// string, adding a 'latest' tag if one was not provided. It returns an -// empty string if a canonical reference was provided -func imageWithTagString(image string) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - return reference.FamiliarString(reference.TagNameOnly(namedRef)) - } - return "" -} - -// digestWarning constructs a formatted warning string using the -// image name that could not be pinned by digest. The formatting -// is hardcoded, but could me made smarter in the future -func digestWarning(image string) string { - return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) -} - -func validateServiceSpec(s swarm.ServiceSpec) error { - if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { - return errors.New("must not specify both a container spec and a plugin spec in the task template") - } - if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { - return errors.New("mismatched runtime with plugin spec") - } - if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { - return errors.New("mismatched runtime with container spec") - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go deleted file mode 100644 index 2801483b..00000000 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { - if serviceID == "" { - return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} - } - query := url.Values{} - query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) - serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) - } - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Service{}, nil, err - } - - var response swarm.Service - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go deleted file mode 100644 index 64d35e71..00000000 --- a/vendor/github.com/docker/docker/client/service_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceList returns the list of services. -func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/services", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var services []swarm.Service - err = json.NewDecoder(resp.body).Decode(&services) - return services, err -} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go deleted file mode 100644 index 906fd405..00000000 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ /dev/null @@ -1,52 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ServiceLogs returns the logs generated by a service in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go deleted file mode 100644 index 953a2adf..00000000 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ServiceRemove kills and removes a service. -func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { - resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "service", serviceID) -} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go deleted file mode 100644 index cd0f59e2..00000000 --- a/vendor/github.com/docker/docker/client/service_update.go +++ /dev/null @@ -1,94 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. -// It should be the value as set *before* the update. You can find this value in the Meta field -// of swarm.Service, which can be found using ServiceInspectWithRaw. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { - var ( - query = url.Values{} - distErr error - ) - - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} - } - - if options.RegistryAuthFrom != "" { - query.Set("registryAuthFrom", options.RegistryAuthFrom) - } - - if options.Rollback != "" { - query.Set("rollback", options.Rollback) - } - - query.Set("version", strconv.FormatUint(version.Index, 10)) - - if err := validateServiceSpec(service); err != nil { - return types.ServiceUpdateResponse{}, err - } - - var imgPlatforms []swarm.Platform - // ensure that the image is tagged - if service.TaskTemplate.ContainerSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.ContainerSpec.Image = img - } - } - } - - // ensure that the image is tagged - if service.TaskTemplate.PluginSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.PluginSpec.Remote = img - } - } - } - - if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { - service.TaskTemplate.Placement = &swarm.Placement{} - } - if len(imgPlatforms) > 0 { - service.TaskTemplate.Placement.Platforms = imgPlatforms - } - - var response types.ServiceUpdateResponse - resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - - if distErr != nil { - response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) - } - - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go deleted file mode 100644 index 19f59dd5..00000000 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// SwarmGetUnlockKey retrieves the swarm's unlock key. -func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { - serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.SwarmUnlockKeyResponse{}, err - } - - var response types.SwarmUnlockKeyResponse - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go deleted file mode 100644 index da3c1637..00000000 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInit initializes the swarm. -func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return "", err - } - - var response string - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go deleted file mode 100644 index b52b67a8..00000000 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInspect inspects the swarm. -func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - serverResp, err := cli.get(ctx, "/swarm", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Swarm{}, err - } - - var response swarm.Swarm - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go deleted file mode 100644 index a1cf0455..00000000 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmJoin joins the swarm. -func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { - resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go deleted file mode 100644 index 90ca84b3..00000000 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// SwarmLeave leaves the swarm. -func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { - query := url.Values{} - if force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go deleted file mode 100644 index d2412f7d..00000000 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUnlock unlocks locked swarm. -func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { - serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - ensureReaderClosed(serverResp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go deleted file mode 100644 index 56a5bea7..00000000 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "fmt" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUpdate updates the swarm. -func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) - query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) - query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) - resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go deleted file mode 100644 index 44d40ba5..00000000 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" -) - -// TaskInspectWithRaw returns the task information and its raw representation.. -func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - if taskID == "" { - return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} - } - serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) - } - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Task{}, nil, err - } - - var response swarm.Task - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go deleted file mode 100644 index 4869b444..00000000 --- a/vendor/github.com/docker/docker/client/task_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// TaskList returns the list of tasks. -func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/tasks", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var tasks []swarm.Task - err = json.NewDecoder(resp.body).Decode(&tasks) - return tasks, err -} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go deleted file mode 100644 index 6222fab5..00000000 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" -) - -// TaskLogs returns the logs generated by a task in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go deleted file mode 100644 index 55413443..00000000 --- a/vendor/github.com/docker/docker/client/transport.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "crypto/tls" - "net/http" -) - -// resolveTLSConfig attempts to resolve the TLS configuration from the -// RoundTripper. -func resolveTLSConfig(transport http.RoundTripper) *tls.Config { - switch tr := transport.(type) { - case *http.Transport: - return tr.TLSClientConfig - default: - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go deleted file mode 100644 index 7f3ff44e..00000000 --- a/vendor/github.com/docker/docker/client/utils.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "regexp" - - "github.com/docker/docker/api/types/filters" -) - -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - -// getDockerOS returns the operating system based on the server header from the daemon. -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - -// getFiltersQuery returns a url query with "filters" query term, based on the -// filters provided. -func getFiltersQuery(f filters.Args) (url.Values, error) { - query := url.Values{} - if f.Len() > 0 { - filterJSON, err := filters.ToJSON(f) - if err != nil { - return query, err - } - query.Set("filters", filterJSON) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go deleted file mode 100644 index 8f17ff4e..00000000 --- a/vendor/github.com/docker/docker/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) - return server, err -} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go deleted file mode 100644 index 92761b3c..00000000 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - volumetypes "github.com/docker/docker/api/types/volume" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { - var volume types.Volume - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - defer ensureReaderClosed(resp) - if err != nil { - return volume, err - } - err = json.NewDecoder(resp.body).Decode(&volume) - return volume, err -} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go deleted file mode 100644 index e20b2c67..00000000 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return volume, err -} - -// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { - if volumeID == "" { - return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} - } - - var volume types.Volume - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return volume, nil, wrapResponseError(err, resp, "volume", volumeID) - } - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return volume, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&volume) - return volume, body, err -} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go deleted file mode 100644 index 2380d563..00000000 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - volumetypes "github.com/docker/docker/api/types/volume" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { - var volumes volumetypes.VolumeListOKBody - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return volumes, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return volumes, err - } - - err = json.NewDecoder(resp.body).Decode(&volumes) - return volumes, err -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go deleted file mode 100644 index 6e324708..00000000 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// VolumesPrune requests the daemon to delete unused data -func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { - var report types.VolumesPruneReport - - if err := cli.NewVersionError("1.25", "volume prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving volume prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go deleted file mode 100644 index 79decdaf..00000000 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/versions" -) - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { - query := url.Values{} - if versions.GreaterThanOrEqualTo(cli.version, "1.25") { - if force { - query.Set("force", "1") - } - } - resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) - defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "volume", volumeID) -} diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index 61e7456b..00000000 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,69 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index c211f174..00000000 --- a/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). -package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index c9916e01..00000000 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,227 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -import "context" - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -// NotFound is a helper to create an error of the class with the same name from any error type -func NotFound(err error) error { - if err == nil || IsNotFound(err) { - return err - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -// InvalidParameter is a helper to create an error of the class with the same name from any error type -func InvalidParameter(err error) error { - if err == nil || IsInvalidParameter(err) { - return err - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -// Conflict is a helper to create an error of the class with the same name from any error type -func Conflict(err error) error { - if err == nil || IsConflict(err) { - return err - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -// Unauthorized is a helper to create an error of the class with the same name from any error type -func Unauthorized(err error) error { - if err == nil || IsUnauthorized(err) { - return err - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -// Unavailable is a helper to create an error of the class with the same name from any error type -func Unavailable(err error) error { - if err == nil || IsUnavailable(err) { - return err - } - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -// Forbidden is a helper to create an error of the class with the same name from any error type -func Forbidden(err error) error { - if err == nil || IsForbidden(err) { - return err - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -// System is a helper to create an error of the class with the same name from any error type -func System(err error) error { - if err == nil || IsSystem(err) { - return err - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -// NotModified is a helper to create an error of the class with the same name from any error type -func NotModified(err error) error { - if err == nil || IsNotModified(err) { - return err - } - return errNotModified{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -// NotImplemented is a helper to create an error of the class with the same name from any error type -func NotImplemented(err error) error { - if err == nil || IsNotImplemented(err) { - return err - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -// Unknown is a helper to create an error of the class with the same name from any error type -func Unknown(err error) error { - if err == nil || IsUnknown(err) { - return err - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -// Cancelled is a helper to create an error of the class with the same name from any error type -func Cancelled(err error) error { - if err == nil || IsCancelled(err) { - return err - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -// Deadline is a helper to create an error of the class with the same name from any error type -func Deadline(err error) error { - if err == nil || IsDeadline(err) { - return err - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -// DataLoss is a helper to create an error of the class with the same name from any error type -func DataLoss(err error) error { - if err == nil || IsDataLoss(err) { - return err - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go deleted file mode 100644 index ac9bf6d3..00000000 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ /dev/null @@ -1,172 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -import ( - "fmt" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// GetHTTPErrorStatusCode retrieves status code from error message. -func GetHTTPErrorStatusCode(err error) int { - if err == nil { - logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") - return http.StatusInternalServerError - } - - var statusCode int - - // Stop right there - // Are you sure you should be adding a new error class here? Do one of the existing ones work? - - // Note that the below functions are already checking the error causal chain for matches. - switch { - case IsNotFound(err): - statusCode = http.StatusNotFound - case IsInvalidParameter(err): - statusCode = http.StatusBadRequest - case IsConflict(err): - statusCode = http.StatusConflict - case IsUnauthorized(err): - statusCode = http.StatusUnauthorized - case IsUnavailable(err): - statusCode = http.StatusServiceUnavailable - case IsForbidden(err): - statusCode = http.StatusForbidden - case IsNotModified(err): - statusCode = http.StatusNotModified - case IsNotImplemented(err): - statusCode = http.StatusNotImplemented - case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): - statusCode = http.StatusInternalServerError - default: - statusCode = statusCodeFromGRPCError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - statusCode = statusCodeFromDistributionError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - if e, ok := err.(causer); ok { - return GetHTTPErrorStatusCode(e.Cause()) - } - - logrus.WithFields(logrus.Fields{ - "module": "api", - "error_type": fmt.Sprintf("%T", err), - }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) - } - - if statusCode == 0 { - statusCode = http.StatusInternalServerError - } - - return statusCode -} - -// FromStatusCode creates an errdef error, based on the provided HTTP status-code -func FromStatusCode(err error, statusCode int) error { - if err == nil { - return err - } - switch statusCode { - case http.StatusNotFound: - err = NotFound(err) - case http.StatusBadRequest: - err = InvalidParameter(err) - case http.StatusConflict: - err = Conflict(err) - case http.StatusUnauthorized: - err = Unauthorized(err) - case http.StatusServiceUnavailable: - err = Unavailable(err) - case http.StatusForbidden: - err = Forbidden(err) - case http.StatusNotModified: - err = NotModified(err) - case http.StatusNotImplemented: - err = NotImplemented(err) - case http.StatusInternalServerError: - if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { - err = System(err) - } - default: - logrus.WithFields(logrus.Fields{ - "module": "api", - "status_code": fmt.Sprintf("%d", statusCode), - }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode) - - switch { - case statusCode >= 200 && statusCode < 400: - // it's a client error - case statusCode >= 400 && statusCode < 500: - err = InvalidParameter(err) - case statusCode >= 500 && statusCode < 600: - err = System(err) - default: - err = Unknown(err) - } - } - return err -} - -// statusCodeFromGRPCError returns status code according to gRPC error -func statusCodeFromGRPCError(err error) int { - switch status.Code(err) { - case codes.InvalidArgument: // code 3 - return http.StatusBadRequest - case codes.NotFound: // code 5 - return http.StatusNotFound - case codes.AlreadyExists: // code 6 - return http.StatusConflict - case codes.PermissionDenied: // code 7 - return http.StatusForbidden - case codes.FailedPrecondition: // code 9 - return http.StatusBadRequest - case codes.Unauthenticated: // code 16 - return http.StatusUnauthorized - case codes.OutOfRange: // code 11 - return http.StatusBadRequest - case codes.Unimplemented: // code 12 - return http.StatusNotImplemented - case codes.Unavailable: // code 14 - return http.StatusServiceUnavailable - default: - if e, ok := err.(causer); ok { - return statusCodeFromGRPCError(e.Cause()) - } - // codes.Canceled(1) - // codes.Unknown(2) - // codes.DeadlineExceeded(4) - // codes.ResourceExhausted(8) - // codes.Aborted(10) - // codes.Internal(13) - // codes.DataLoss(15) - return http.StatusInternalServerError - } -} - -// statusCodeFromDistributionError returns status code according to registry errcode -// code is loosely based on errcode.ServeJSON() in docker/distribution -func statusCodeFromDistributionError(err error) int { - switch errs := err.(type) { - case errcode.Errors: - if len(errs) < 1 { - return http.StatusInternalServerError - } - if _, ok := errs[0].(errcode.ErrorCoder); ok { - return statusCodeFromDistributionError(errs[0]) - } - case errcode.ErrorCoder: - return errs.ErrorCode().Descriptor().HTTPStatusCode - default: - if e, ok := err.(causer); ok { - return statusCodeFromDistributionError(e.Cause()) - } - } - return http.StatusInternalServerError -} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index 3abf07d0..00000000 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,107 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -type causer interface { - Cause() error -} - -func getImplementer(err error) error { - switch e := err.(type) { - case - ErrNotFound, - ErrInvalidParameter, - ErrConflict, - ErrUnauthorized, - ErrUnavailable, - ErrForbidden, - ErrSystem, - ErrNotModified, - ErrNotImplemented, - ErrCancelled, - ErrDeadline, - ErrDataLoss, - ErrUnknown: - return err - case causer: - return getImplementer(e.Cause()) - default: - return err - } -} - -// IsNotFound returns if the passed in error is an ErrNotFound -func IsNotFound(err error) bool { - _, ok := getImplementer(err).(ErrNotFound) - return ok -} - -// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter -func IsInvalidParameter(err error) bool { - _, ok := getImplementer(err).(ErrInvalidParameter) - return ok -} - -// IsConflict returns if the passed in error is an ErrConflict -func IsConflict(err error) bool { - _, ok := getImplementer(err).(ErrConflict) - return ok -} - -// IsUnauthorized returns if the passed in error is an ErrUnauthorized -func IsUnauthorized(err error) bool { - _, ok := getImplementer(err).(ErrUnauthorized) - return ok -} - -// IsUnavailable returns if the passed in error is an ErrUnavailable -func IsUnavailable(err error) bool { - _, ok := getImplementer(err).(ErrUnavailable) - return ok -} - -// IsForbidden returns if the passed in error is an ErrForbidden -func IsForbidden(err error) bool { - _, ok := getImplementer(err).(ErrForbidden) - return ok -} - -// IsSystem returns if the passed in error is an ErrSystem -func IsSystem(err error) bool { - _, ok := getImplementer(err).(ErrSystem) - return ok -} - -// IsNotModified returns if the passed in error is a NotModified error -func IsNotModified(err error) bool { - _, ok := getImplementer(err).(ErrNotModified) - return ok -} - -// IsNotImplemented returns if the passed in error is an ErrNotImplemented -func IsNotImplemented(err error) bool { - _, ok := getImplementer(err).(ErrNotImplemented) - return ok -} - -// IsUnknown returns if the passed in error is an ErrUnknown -func IsUnknown(err error) bool { - _, ok := getImplementer(err).(ErrUnknown) - return ok -} - -// IsCancelled returns if the passed in error is an ErrCancelled -func IsCancelled(err error) bool { - _, ok := getImplementer(err).(ErrCancelled) - return ok -} - -// IsDeadline returns if the passed in error is an ErrDeadline -func IsDeadline(err error) bool { - _, ok := getImplementer(err).(ErrDeadline) - return ok -} - -// IsDataLoss returns if the passed in error is an ErrDataLoss -func IsDataLoss(err error) bool { - _, ok := getImplementer(err).(ErrDataLoss) - return ok -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go deleted file mode 100644 index 0601f7b0..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ /dev/null @@ -1,261 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/containerd/continuity/fs" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { - if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{inUserNS: inUserNS} - } - return nil -} - -type overlayWhiteoutConverter struct { - inUserNS bool -} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir != 0 { - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - if err != nil { - return nil, err - } - if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, "trusted.overlay.opaque") - } - - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - } - } - - return -} - -func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) - if err != nil { - if c.inUserNS { - if err = replaceDirWithOverlayOpaque(dir); err != nil { - return false, errors.Wrapf(err, "replaceDirWithOverlayOpaque(%q) failed", dir) - } - } else { - return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir) - } - } - // don't write the file itself - return false, err - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - if c.inUserNS { - // Ubuntu and a few distros support overlayfs in userns. - // - // Although we can't call mknod directly in userns (at least on bionic kernel 4.15), - // we can still create 0,0 char device using mknodChar0Overlay(). - // - // NOTE: we don't need this hack for the containerd snapshotter+unpack model. - if err := mknodChar0Overlay(originalPath); err != nil { - return false, errors.Wrapf(err, "failed to mknodChar0UserNS(%q)", originalPath) - } - } else { - return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath) - } - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} - -// mknodChar0Overlay creates 0,0 char device by mounting overlayfs and unlinking. -// This function can be used for creating 0,0 char device in userns on Ubuntu. -// -// Steps: -// * Mkdir lower,upper,merged,work -// * Create lower/dummy -// * Mount overlayfs -// * Unlink merged/dummy -// * Unmount overlayfs -// * Make sure a 0,0 char device is created as upper/dummy -// * Rename upper/dummy to cleansedOriginalPath -func mknodChar0Overlay(cleansedOriginalPath string) error { - dir := filepath.Dir(cleansedOriginalPath) - tmp, err := ioutil.TempDir(dir, "mc0o") - if err != nil { - return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) - } - defer os.RemoveAll(tmp) - lower := filepath.Join(tmp, "l") - upper := filepath.Join(tmp, "u") - work := filepath.Join(tmp, "w") - merged := filepath.Join(tmp, "m") - for _, s := range []string{lower, upper, work, merged} { - if err := os.MkdirAll(s, 0700); err != nil { - return errors.Wrapf(err, "failed to mkdir %s", s) - } - } - dummyBase := "d" - lowerDummy := filepath.Join(lower, dummyBase) - if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil { - return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy) - } - mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) - // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. - if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { - return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) - } - mergedDummy := filepath.Join(merged, dummyBase) - if err := os.Remove(mergedDummy); err != nil { - syscall.Unmount(merged, 0) - return errors.Wrapf(err, "failed to unlink %s", mergedDummy) - } - if err := syscall.Unmount(merged, 0); err != nil { - return errors.Wrapf(err, "failed to unmount %s", merged) - } - upperDummy := filepath.Join(upper, dummyBase) - if err := isChar0(upperDummy); err != nil { - return err - } - if err := os.Rename(upperDummy, cleansedOriginalPath); err != nil { - return errors.Wrapf(err, "failed to rename %s to %s", upperDummy, cleansedOriginalPath) - } - return nil -} - -func isChar0(path string) error { - osStat, err := os.Stat(path) - if err != nil { - return errors.Wrapf(err, "failed to stat %s", path) - } - st, ok := osStat.Sys().(*syscall.Stat_t) - if !ok { - return errors.Errorf("got unsupported stat for %s", path) - } - if os.FileMode(st.Mode)&syscall.S_IFMT != syscall.S_IFCHR { - return errors.Errorf("%s is not a character device, got mode=%d", path, st.Mode) - } - if st.Rdev != 0 { - return errors.Errorf("%s is not a 0,0 character device, got Rdev=%d", path, st.Rdev) - } - return nil -} - -// replaceDirWithOverlayOpaque replaces path with a new directory with trusted.overlay.opaque -// xattr. The contents of the directory are preserved. -func replaceDirWithOverlayOpaque(path string) error { - if path == "/" { - return errors.New("replaceDirWithOverlayOpaque: path must not be \"/\"") - } - dir := filepath.Dir(path) - tmp, err := ioutil.TempDir(dir, "rdwoo") - if err != nil { - return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) - } - defer os.RemoveAll(tmp) - // newPath is a new empty directory crafted with trusted.overlay.opaque xattr. - // we copy the content of path into newPath, remove path, and rename newPath to path. - newPath, err := createDirWithOverlayOpaque(tmp) - if err != nil { - return errors.Wrapf(err, "createDirWithOverlayOpaque(%q) failed", tmp) - } - if err := fs.CopyDir(newPath, path); err != nil { - return errors.Wrapf(err, "CopyDir(%q, %q) failed", newPath, path) - } - if err := os.RemoveAll(path); err != nil { - return err - } - return os.Rename(newPath, path) -} - -// createDirWithOverlayOpaque creates a directory with trusted.overlay.opaque xattr, -// without calling setxattr, so as to allow creating opaque dir in userns on Ubuntu. -func createDirWithOverlayOpaque(tmp string) (string, error) { - lower := filepath.Join(tmp, "l") - upper := filepath.Join(tmp, "u") - work := filepath.Join(tmp, "w") - merged := filepath.Join(tmp, "m") - for _, s := range []string{lower, upper, work, merged} { - if err := os.MkdirAll(s, 0700); err != nil { - return "", errors.Wrapf(err, "failed to mkdir %s", s) - } - } - dummyBase := "d" - lowerDummy := filepath.Join(lower, dummyBase) - if err := os.MkdirAll(lowerDummy, 0700); err != nil { - return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy) - } - mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) - // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. - if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { - return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) - } - mergedDummy := filepath.Join(merged, dummyBase) - if err := os.Remove(mergedDummy); err != nil { - syscall.Unmount(merged, 0) - return "", errors.Wrapf(err, "failed to rmdir %s", mergedDummy) - } - // upperDummy becomes a 0,0-char device file here - if err := os.Mkdir(mergedDummy, 0700); err != nil { - syscall.Unmount(merged, 0) - return "", errors.Wrapf(err, "failed to mkdir %s", mergedDummy) - } - // upperDummy becomes a directory with trusted.overlay.opaque xattr - // (but can't be verified in userns) - if err := syscall.Unmount(merged, 0); err != nil { - return "", errors.Wrapf(err, "failed to unmount %s", merged) - } - upperDummy := filepath.Join(upper, dummyBase) - return upperDummy, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go deleted file mode 100644 index 65a73354..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index 9906685e..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Note there is slight difference between the Linux and Windows - // implementations here. Due to https://github.com/moby/moby/issues/9874, - // and the fix at https://github.com/moby/moby/pull/11422, Linux does not - // consider a change to the directory time as a change. Windows on NTFS - // does. See https://github.com/moby/moby/pull/37982 for more information. - - if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode().IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index 4ae38a1b..00000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -// This is currently a wrapper around MkdirAll, however, since currently -// permissions aren't set through this path, the identity isn't utilized. -// Ownership is handled elsewhere, but in the future could be support here -// too. -func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode, ""); err != nil { - return err - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -// Windows does not require/support this function, so always return true -func CanAccess(path string, identity Identity) bool { - return true -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go deleted file mode 100644 index 4afd63c4..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ /dev/null @@ -1,159 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "sort" - "strconv" - "strings" - - "github.com/sirupsen/logrus" -) - -// mountError records an error from mount or unmount operation -type mountError struct { - op string - source, target string - flags uintptr - data string - err error -} - -func (e *mountError) Error() string { - out := e.op + " " - - if e.source != "" { - out += e.source + ":" + e.target - } else { - out += e.target - } - - if e.flags != uintptr(0) { - out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) - } - if e.data != "" { - out += ", data: " + e.data - } - - out += ": " + e.err.Error() - return out -} - -// Cause returns the underlying cause of the error -func (e *mountError) Cause() error { - return e.err -} - -// FilterFunc is a type defining a callback function -// to filter out unwanted entries. It takes a pointer -// to an Info struct (not fully populated, currently -// only Mountpoint is filled in), and returns two booleans: -// - skip: true if the entry should be skipped -// - stop: true if parsing should be stopped after the entry -type FilterFunc func(*Info) (skip, stop bool) - -// PrefixFilter discards all entries whose mount points -// do not start with a prefix specified -func PrefixFilter(prefix string) FilterFunc { - return func(m *Info) (bool, bool) { - skip := !strings.HasPrefix(m.Mountpoint, prefix) - return skip, false - } -} - -// SingleEntryFilter looks for a specific entry -func SingleEntryFilter(mp string) FilterFunc { - return func(m *Info) (bool, bool) { - if m.Mountpoint == mp { - return false, true // don't skip, stop now - } - return true, false // skip, keep going - } -} - -// ParentsFilter returns all entries whose mount points -// can be parents of a path specified, discarding others. -// For example, given `/var/lib/docker/something`, entries -// like `/var/lib/docker`, `/var` and `/` are returned. -func ParentsFilter(path string) FilterFunc { - return func(m *Info) (bool, bool) { - skip := !strings.HasPrefix(path, m.Mountpoint) - return skip, false - } -} - -// GetMounts retrieves a list of mounts for the current running process, -// with an optional filter applied (use nil for no filter). -func GetMounts(f FilterFunc) ([]*Info, error) { - return parseMountTable(f) -} - -// Mounted determines if a specified mountpoint has been mounted. -// On Linux it looks at /proc/self/mountinfo. -func Mounted(mountpoint string) (bool, error) { - entries, err := GetMounts(SingleEntryFilter(mountpoint)) - if err != nil { - return false, err - } - - return len(entries) > 0, nil -} - -// Mount will mount filesystem according to the specified configuration, on the -// condition that the target path is *not* already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func Mount(device, target, mType, options string) error { - flag, _ := parseOptions(options) - if flag&REMOUNT != REMOUNT { - if mounted, err := Mounted(target); err != nil || mounted { - return err - } - } - return ForceMount(device, target, mType, options) -} - -// ForceMount will mount a filesystem according to the specified configuration, -// *regardless* if the target path is not already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func ForceMount(device, target, mType, options string) error { - flag, data := parseOptions(options) - return mount(device, target, mType, uintptr(flag), data) -} - -// Unmount lazily unmounts a filesystem on supported platforms, otherwise -// does a normal unmount. -func Unmount(target string) error { - return unmount(target, mntDetach) -} - -// RecursiveUnmount unmounts the target and all mounts underneath, starting with -// the deepsest mount first. -func RecursiveUnmount(target string) error { - mounts, err := parseMountTable(PrefixFilter(target)) - if err != nil { - return err - } - - // Make the deepest mount be first - sort.Slice(mounts, func(i, j int) bool { - return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) - }) - - for i, m := range mounts { - logrus.Debugf("Trying to unmount %s", m.Mountpoint) - err = unmount(m.Mountpoint, mntDetach) - if err != nil { - if i == len(mounts)-1 { // last mount - if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { - return err - } - } else { - // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem - logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) - } - } - - logrus.Debugf("Unmounted %s", m.Mountpoint) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go deleted file mode 100644 index c3e5aec2..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,!freebsd freebsd,!cgo - -package mount // import "github.com/docker/docker/pkg/mount" - -func mount(device, target, mType string, flag uintptr, data string) error { - panic("Not implemented") -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go deleted file mode 100644 index fe6e3ddb..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ /dev/null @@ -1,144 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { - s := bufio.NewScanner(r) - out := []*Info{} - var err error - for s.Scan() { - if err = s.Err(); err != nil { - return nil, err - } - /* - See http://man7.org/linux/man-pages/man5/proc.5.html - - 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options - */ - - text := s.Text() - fields := strings.Split(text, " ") - numFields := len(fields) - if numFields < 10 { - // should be at least 10 fields - return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) - } - - p := &Info{} - // ignore any numbers parsing errors, as there should not be any - p.ID, _ = strconv.Atoi(fields[0]) - p.Parent, _ = strconv.Atoi(fields[1]) - mm := strings.Split(fields[2], ":") - if len(mm) != 2 { - return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) - } - p.Major, _ = strconv.Atoi(mm[0]) - p.Minor, _ = strconv.Atoi(mm[1]) - - p.Root, err = strconv.Unquote(`"` + fields[3] + `"`) - if err != nil { - return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote root field", fields[3]) - } - - p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`) - if err != nil { - return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote mount point field", fields[4]) - } - p.Opts = fields[5] - - var skip, stop bool - if filter != nil { - // filter out entries we're not interested in - skip, stop = filter(p) - if skip { - continue - } - } - - // one or more optional fields, when a separator (-) - i := 6 - for ; i < numFields && fields[i] != "-"; i++ { - switch i { - case 6: - p.Optional = fields[6] - default: - /* NOTE there might be more optional fields before the such as - fields[7]...fields[N] (where N < sepIndex), although - as of Linux kernel 4.15 the only known ones are - mount propagation flags in fields[6]. The correct - behavior is to ignore any unknown optional fields. - */ - break - } - } - if i == numFields { - return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) - } - - // There should be 3 fields after the separator... - if i+4 > numFields { - return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) - } - // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name - // (like "//serv/My Documents") _may_ end up having a space in the last field - // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs - // option unc= is ignored, so a space should not appear. In here we ignore - // those "extra" fields caused by extra spaces. - p.Fstype = fields[i+1] - p.Source = fields[i+2] - p.VfsOpts = fields[i+3] - - out = append(out, p) - if stop { - break - } - } - return out, nil -} - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts -func parseMountTable(filter FilterFunc) ([]*Info, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f, filter) -} - -// PidMountInfo collects the mounts for a specific process ID. If the process -// ID is unknown, it is better to use `GetMounts` which will inspect -// "/proc/self/mountinfo" instead. -func PidMountInfo(pid int) ([]*Info, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f, nil) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go deleted file mode 100644 index 27e0f697..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -func parseMountTable(f FilterFunc) ([]*Info, error) { - // Do NOT return an error! - return nil, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go b/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go deleted file mode 100644 index 4be42768..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows - -package mount // import "github.com/docker/docker/pkg/mount" - -import "golang.org/x/sys/unix" - -func unmount(target string, flags int) error { - err := unix.Unmount(target, flags) - if err == nil || err == unix.EINVAL { - // Ignore "not mounted" error here. Note the same error - // can be returned if flags are invalid, so this code - // assumes that the flags value is always correct. - return nil - } - - return &mountError{ - op: "umount", - target: target, - flags: uintptr(flags), - err: err, - } -} diff --git a/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go deleted file mode 100644 index a88ad357..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows - -package mount // import "github.com/docker/docker/pkg/mount" - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/vendor/github.com/docker/docker/pkg/system/args_windows.go b/vendor/github.com/docker/docker/pkg/system/args_windows.go deleted file mode 100644 index b7c9487a..00000000 --- a/vendor/github.com/docker/docker/pkg/system/args_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "strings" - - "golang.org/x/sys/windows" -) - -// EscapeArgs makes a Windows-style escaped command line from a set of arguments -func EscapeArgs(args []string) string { - escapedArgs := make([]string, len(args)) - for i, a := range args { - escapedArgs[i] = windows.EscapeArg(a) - } - return strings.Join(escapedArgs, " ") -} diff --git a/vendor/github.com/docker/docker/pkg/system/init_unix.go b/vendor/github.com/docker/docker/pkg/system/init_unix.go deleted file mode 100644 index c2bb0f4c..00000000 --- a/vendor/github.com/docker/docker/pkg/system/init_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -// InitLCOW does nothing since LCOW is a windows only feature -func InitLCOW(experimental bool) { -} - -// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. -func ContainerdRuntimeSupported(_ bool, _ string) bool { - return true -} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go deleted file mode 100644 index f303aa90..00000000 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - - "github.com/Microsoft/hcsshim/osversion" - "github.com/sirupsen/logrus" -) - -var ( - // lcowSupported determines if Linux Containers on Windows are supported. - lcowSupported = false - - // containerdRuntimeSupported determines if ContainerD should be the runtime. - // As of March 2019, this is an experimental feature. - containerdRuntimeSupported = false -) - -// InitLCOW sets whether LCOW is supported or not. Requires RS5+ -func InitLCOW(experimental bool) { - v := GetOSVersion() - if experimental && v.Build >= osversion.RS5 { - lcowSupported = true - } -} - -// InitContainerdRuntime sets whether to use ContainerD for runtime -// on Windows. This is an experimental feature still in development, and -// also requires an environment variable to be set (so as not to turn the -// feature on from simply experimental which would also mean LCOW. -func InitContainerdRuntime(experimental bool, cdPath string) { - if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 { - logrus.Warnf("Using ContainerD runtime. This feature is experimental") - containerdRuntimeSupported = true - } -} - -// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. -func ContainerdRuntimeSupported() bool { - return containerdRuntimeSupported -} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go deleted file mode 100644 index 5be3e218..00000000 --- a/vendor/github.com/docker/docker/pkg/system/lcow.go +++ /dev/null @@ -1,32 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "runtime" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// IsOSSupported determines if an operating system is supported by the host -func IsOSSupported(os string) bool { - if strings.EqualFold(runtime.GOOS, os) { - return true - } - if LCOWSupported() && strings.EqualFold(os, "linux") { - return true - } - return false -} - -// ValidatePlatform determines if a platform structure is valid. -// TODO This is a temporary windows-only function, should be replaced by -// comparison of worker capabilities -func ValidatePlatform(platform specs.Platform) error { - if runtime.GOOS == "windows" { - if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { - return errors.Errorf("unsupported os %s", platform.OS) - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go deleted file mode 100644 index b0b93196..00000000 --- a/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -// GetLongPathName converts Windows short pathnames to full pathnames. -// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. -// It is a no-op on non-Windows platforms -func GetLongPathName(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go deleted file mode 100644 index 188f2c29..00000000 --- a/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// GetLongPathName converts Windows short pathnames to full pathnames. -// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. -// It is a no-op on non-Windows platforms -func GetLongPathName(path string) (string, error) { - // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg - p := syscall.StringToUTF16(path) - b := p // GetLongPathName says we can reuse buffer - n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - _, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - return syscall.UTF16ToString(b), nil -} diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go deleted file mode 100644 index 99846ffd..00000000 --- a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go +++ /dev/null @@ -1,81 +0,0 @@ -package sockets - -import ( - "errors" - "net" - "sync" -) - -var errClosed = errors.New("use of closed network connection") - -// InmemSocket implements net.Listener using in-memory only connections. -type InmemSocket struct { - chConn chan net.Conn - chClose chan struct{} - addr string - mu sync.Mutex -} - -// dummyAddr is used to satisfy net.Addr for the in-mem socket -// it is just stored as a string and returns the string for all calls -type dummyAddr string - -// NewInmemSocket creates an in-memory only net.Listener -// The addr argument can be any string, but is used to satisfy the `Addr()` part -// of the net.Listener interface -func NewInmemSocket(addr string, bufSize int) *InmemSocket { - return &InmemSocket{ - chConn: make(chan net.Conn, bufSize), - chClose: make(chan struct{}), - addr: addr, - } -} - -// Addr returns the socket's addr string to satisfy net.Listener -func (s *InmemSocket) Addr() net.Addr { - return dummyAddr(s.addr) -} - -// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. -func (s *InmemSocket) Accept() (net.Conn, error) { - select { - case conn := <-s.chConn: - return conn, nil - case <-s.chClose: - return nil, errClosed - } -} - -// Close closes the listener. It will be unavailable for use once closed. -func (s *InmemSocket) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - select { - case <-s.chClose: - default: - close(s.chClose) - } - return nil -} - -// Dial is used to establish a connection with the in-mem server -func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { - srvConn, clientConn := net.Pipe() - select { - case s.chConn <- srvConn: - case <-s.chClose: - return nil, errClosed - } - - return clientConn, nil -} - -// Network returns the addr string, satisfies net.Addr -func (a dummyAddr) Network() string { - return string(a) -} - -// String returns the string form -func (a dummyAddr) String() string { - return string(a) -} diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go deleted file mode 100644 index 98e9a1dc..00000000 --- a/vendor/github.com/docker/go-connections/sockets/proxy.go +++ /dev/null @@ -1,51 +0,0 @@ -package sockets - -import ( - "net" - "net/url" - "os" - "strings" - - "golang.org/x/net/proxy" -) - -// GetProxyEnv allows access to the uppercase and the lowercase forms of -// proxy-related variables. See the Go specification for details on these -// variables. https://golang.org/pkg/net/http/ -func GetProxyEnv(key string) string { - proxyValue := os.Getenv(strings.ToUpper(key)) - if proxyValue == "" { - return os.Getenv(strings.ToLower(key)) - } - return proxyValue -} - -// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a -// proxy.Dialer which will route the connections through the proxy using the -// given dialer. -func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { - allProxy := GetProxyEnv("all_proxy") - if len(allProxy) == 0 { - return direct, nil - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return direct, err - } - - proxyFromURL, err := proxy.FromURL(proxyURL, direct) - if err != nil { - return direct, err - } - - noProxy := GetProxyEnv("no_proxy") - if len(noProxy) == 0 { - return proxyFromURL, nil - } - - perHost := proxy.NewPerHost(proxyFromURL, direct) - perHost.AddFromString(noProxy) - - return perHost, nil -} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go deleted file mode 100644 index a1d7beb4..00000000 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ /dev/null @@ -1,38 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP sockets. -package sockets - -import ( - "errors" - "net" - "net/http" - "time" -) - -// Why 32? See https://github.com/docker/docker/pull/8035. -const defaultTimeout = 32 * time.Second - -// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. -var ErrProtocolNotAvailable = errors.New("protocol not available") - -// ConfigureTransport configures the specified Transport according to the -// specified proto and addr. -// If the proto is unix (using a unix socket to communicate) or npipe the -// compression is disabled. -func ConfigureTransport(tr *http.Transport, proto, addr string) error { - switch proto { - case "unix": - return configureUnixTransport(tr, proto, addr) - case "npipe": - return configureNpipeTransport(tr, proto, addr) - default: - tr.Proxy = http.ProxyFromEnvironment - dialer, err := DialerFromEnvironment(&net.Dialer{ - Timeout: defaultTimeout, - }) - if err != nil { - return err - } - tr.Dial = dialer.Dial - } - return nil -} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go deleted file mode 100644 index 386cf0db..00000000 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package sockets - -import ( - "fmt" - "net" - "net/http" - "syscall" - "time" -) - -const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) - -func configureUnixTransport(tr *http.Transport, proto, addr string) error { - if len(addr) > maxUnixSocketPathSize { - return fmt.Errorf("Unix socket path %q is too long", addr) - } - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) - } - return nil -} - -func configureNpipeTransport(tr *http.Transport, proto, addr string) error { - return ErrProtocolNotAvailable -} - -// DialPipe connects to a Windows named pipe. -// This is not supported on other OSes. -func DialPipe(_ string, _ time.Duration) (net.Conn, error) { - return nil, syscall.EAFNOSUPPORT -} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go deleted file mode 100644 index 5c21644e..00000000 --- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -package sockets - -import ( - "net" - "net/http" - "time" - - "github.com/Microsoft/go-winio" -) - -func configureUnixTransport(tr *http.Transport, proto, addr string) error { - return ErrProtocolNotAvailable -} - -func configureNpipeTransport(tr *http.Transport, proto, addr string) error { - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) - } - return nil -} - -// DialPipe connects to a Windows named pipe. -func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { - return winio.DialPipe(addr, &timeout) -} diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go deleted file mode 100644 index 53cbb6c7..00000000 --- a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP sockets. -package sockets - -import ( - "crypto/tls" - "net" -) - -// NewTCPSocket creates a TCP socket listener with the specified address and -// the specified tls configuration. If TLSConfig is set, will encapsulate the -// TCP listener inside a TLS one. -func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { - l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - if tlsConfig != nil { - tlsConfig.NextProtos = []string{"http/1.1"} - l = tls.NewListener(l, tlsConfig) - } - return l, nil -} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go deleted file mode 100644 index a8b5dbb6..00000000 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !windows - -package sockets - -import ( - "net" - "os" - "syscall" -) - -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path string, gid int) (net.Listener, error) { - if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { - return nil, err - } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) - - l, err := net.Listen("unix", path) - if err != nil { - return nil, err - } - if err := os.Chown(path, 0, gid); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err - } - return l, nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go deleted file mode 100644 index 1ca0965e..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build go1.7 - -package tlsconfig - -import ( - "crypto/x509" - "runtime" -) - -// SystemCertPool returns a copy of the system cert pool, -// returns an error if failed to load or empty pool on windows. -func SystemCertPool() (*x509.CertPool, error) { - certpool, err := x509.SystemCertPool() - if err != nil && runtime.GOOS == "windows" { - return x509.NewCertPool(), nil - } - return certpool, err -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go deleted file mode 100644 index 1ff81c33..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package tlsconfig - -import ( - "crypto/x509" -) - -// SystemCertPool returns an new empty cert pool, -// accessing system cert pool is supported in go 1.7 -func SystemCertPool() (*x509.CertPool, error) { - return x509.NewCertPool(), nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go deleted file mode 100644 index 0ef3fdcb..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ /dev/null @@ -1,254 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "os" - - "github.com/pkg/errors" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType - // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS - // creds will include exclusively the roots in that CA file. If no CA file is provided, - // the system pool will be used. - ExclusiveRootPools bool - MinVersion uint16 - // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted - Passphrase string -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionSSL30: {}, - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} - -// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. -func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ - // Avoid fallback by default to SSL protocols < TLS1.2 - MinVersion: tls.VersionTLS12, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, - } - - for _, op := range ops { - op(tlsconfig) - } - - return tlsconfig -} - -// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. -func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, - } - - for _, op := range ops { - op(tlsconfig) - } - - return tlsconfig -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - var ( - certPool *x509.CertPool - err error - ) - if exclusivePool { - certPool = x509.NewCertPool() - } else { - certPool, err = SystemCertPool() - if err != nil { - return nil, fmt.Errorf("failed to read system certificates: %v", err) - } - } - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - return certPool, nil -} - -// isValidMinVersion checks that the input value is a valid tls minimum version -func isValidMinVersion(version uint16) bool { - _, ok := allTLSVersions[version] - return ok -} - -// adjustMinVersion sets the MinVersion on `config`, the input configuration. -// It assumes the current MinVersion on the `config` is the lowest allowed. -func adjustMinVersion(options Options, config *tls.Config) error { - if options.MinVersion > 0 { - if !isValidMinVersion(options.MinVersion) { - return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) - } - if options.MinVersion < config.MinVersion { - return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) - } - config.MinVersion = options.MinVersion - } - - return nil -} - -// IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when tryin to decrypt a TLS private key -func IsErrEncryptedKey(err error) bool { - return errors.Cause(err) == x509.IncorrectPasswordError -} - -// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. -// If the private key is encrypted, 'passphrase' is used to decrypted the -// private key. -func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { - // this section makes some small changes to code from notary/tuf/utils/x509.go - pemBlock, _ := pem.Decode(keyBytes) - if pemBlock == nil { - return nil, fmt.Errorf("no valid private key found") - } - - var err error - if x509.IsEncryptedPEMBlock(pemBlock) { - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) - if err != nil { - return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") - } - keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) - } - - return keyBytes, nil -} - -// getCert returns a Certificate from the CertFile and KeyFile in 'options', -// if the key is encrypted, the Passphrase in 'options' will be used to -// decrypt it. -func getCert(options Options) ([]tls.Certificate, error) { - if options.CertFile == "" && options.KeyFile == "" { - return nil, nil - } - - errMessage := "Could not load X509 key pair" - - cert, err := ioutil.ReadFile(options.CertFile) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - prKeyBytes, err := ioutil.ReadFile(options.KeyFile) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - return []tls.Certificate{tlsCert}, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault() - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify && options.CAFile != "" { - CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - tlsCerts, err := getCert(options) - if err != nil { - return nil, err - } - tlsConfig.Certificates = tlsCerts - - if err := adjustMinVersion(options, tlsConfig); err != nil { - return nil, err - } - - return tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault() - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { - CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - - if err := adjustMinVersion(options, tlsConfig); err != nil { - return nil, err - } - - return tlsConfig, nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index 6b4c6a7c..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/fatih/color/.travis.yml b/vendor/github.com/fatih/color/.travis.yml deleted file mode 100644 index 95f8a1ff..00000000 --- a/vendor/github.com/fatih/color/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.8.x - - tip - diff --git a/vendor/github.com/fatih/color/Gopkg.lock b/vendor/github.com/fatih/color/Gopkg.lock deleted file mode 100644 index 7d879e9c..00000000 --- a/vendor/github.com/fatih/color/Gopkg.lock +++ /dev/null @@ -1,27 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/mattn/go-colorable" - packages = ["."] - revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" - version = "v0.0.9" - -[[projects]] - name = "github.com/mattn/go-isatty" - packages = ["."] - revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" - version = "v0.0.3" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = ["unix"] - revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/fatih/color/Gopkg.toml b/vendor/github.com/fatih/color/Gopkg.toml deleted file mode 100644 index ff1617f7..00000000 --- a/vendor/github.com/fatih/color/Gopkg.toml +++ /dev/null @@ -1,30 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - name = "github.com/mattn/go-colorable" - version = "0.0.9" - -[[constraint]] - name = "github.com/mattn/go-isatty" - version = "0.0.3" diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md deleted file mode 100644 index 25fdaf63..00000000 --- a/vendor/github.com/fatih/color/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md deleted file mode 100644 index 3fc95446..00000000 --- a/vendor/github.com/fatih/color/README.md +++ /dev/null @@ -1,179 +0,0 @@ -# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color) - - - -Color lets you use colorized outputs in terms of [ANSI Escape -Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It -has support for Windows too! The API can be used in several ways, pick one that -suits you. - - -![Color](https://i.imgur.com/c1JI0lA.png) - - -## Install - -```bash -go get github.com/fatih/color -``` - -Note that the `vendor` folder is here for stability. Remove the folder if you -already have the dependencies in your GOPATH. - -## Examples - -### Standard colors - -```go -// Print with default helper functions -color.Cyan("Prints text in cyan.") - -// A newline will be appended automatically -color.Blue("Prints %s in blue.", "text") - -// These are using the default foreground colors -color.Red("We have red") -color.Magenta("And many others ..") - -``` - -### Mix and reuse colors - -```go -// Create a new color object -c := color.New(color.FgCyan).Add(color.Underline) -c.Println("Prints cyan text with an underline.") - -// Or just add them to New() -d := color.New(color.FgCyan, color.Bold) -d.Printf("This prints bold cyan %s\n", "too!.") - -// Mix up foreground and background colors, create new mixes! -red := color.New(color.FgRed) - -boldRed := red.Add(color.Bold) -boldRed.Println("This will print text in bold red.") - -whiteBackground := red.Add(color.BgWhite) -whiteBackground.Println("Red text with white background.") -``` - -### Use your own output (io.Writer) - -```go -// Use your own io.Writer output -color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - -blue := color.New(color.FgBlue) -blue.Fprint(writer, "This will print text in blue.") -``` - -### Custom print functions (PrintFunc) - -```go -// Create a custom print function for convenience -red := color.New(color.FgRed).PrintfFunc() -red("Warning") -red("Error: %s", err) - -// Mix up multiple attributes -notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() -notice("Don't forget this...") -``` - -### Custom fprint functions (FprintFunc) - -```go -blue := color.New(FgBlue).FprintfFunc() -blue(myWriter, "important notice: %s", stars) - -// Mix up with multiple attributes -success := color.New(color.Bold, color.FgGreen).FprintlnFunc() -success(myWriter, "Don't forget this...") -``` - -### Insert into noncolor strings (SprintFunc) - -```go -// Create SprintXxx functions to mix strings with other non-colorized strings: -yellow := color.New(color.FgYellow).SprintFunc() -red := color.New(color.FgRed).SprintFunc() -fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) - -info := color.New(color.FgWhite, color.BgGreen).SprintFunc() -fmt.Printf("This %s rocks!\n", info("package")) - -// Use helper functions -fmt.Println("This", color.RedString("warning"), "should be not neglected.") -fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") - -// Windows supported too! Just don't forget to change the output to color.Output -fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) -``` - -### Plug into existing code - -```go -// Use handy standard colors -color.Set(color.FgYellow) - -fmt.Println("Existing text will now be in yellow") -fmt.Printf("This one %s\n", "too") - -color.Unset() // Don't forget to unset - -// You can mix up parameters -color.Set(color.FgMagenta, color.Bold) -defer color.Unset() // Use it in your function - -fmt.Println("All text will now be bold magenta.") -``` - -### Disable/Enable color - -There might be a case where you want to explicitly disable/enable color output. the -`go-isatty` package will automatically disable color output for non-tty output streams -(for example if the output were piped directly to `less`) - -`Color` has support to disable/enable colors both globally and for single color -definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You -can easily disable the color output with: - -```go - -var flagNoColor = flag.Bool("no-color", false, "Disable color output") - -if *flagNoColor { - color.NoColor = true // disables colorized output -} -``` - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - -```go -c := color.New(color.FgCyan) -c.Println("Prints cyan text") - -c.DisableColor() -c.Println("This is printed without any color") - -c.EnableColor() -c.Println("This prints again cyan...") -``` - -## Todo - -* Save/Return previous values -* Evaluate fmt.Formatter interface - - -## Credits - - * [Fatih Arslan](https://github.com/fatih) - * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) - -## License - -The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details - diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go deleted file mode 100644 index 91c8e9f0..00000000 --- a/vendor/github.com/fatih/color/color.go +++ /dev/null @@ -1,603 +0,0 @@ -package color - -import ( - "fmt" - "io" - "os" - "strconv" - "strings" - "sync" - - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" -) - -var ( - // NoColor defines if the output is colorized or not. It's dynamically set to - // false or true based on the stdout's file descriptor referring to a terminal - // or not. This is a global option and affects all colors. For more control - // over each color block use the methods DisableColor() individually. - NoColor = os.Getenv("TERM") == "dumb" || - (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) - - // Output defines the standard output of the print functions. By default - // os.Stdout is used. - Output = colorable.NewColorableStdout() - - // Error defines a color supporting writer for os.Stderr. - Error = colorable.NewColorableStderr() - - // colorsCache is used to reduce the count of created Color objects and - // allows to reuse already created objects with required Attribute. - colorsCache = make(map[Attribute]*Color) - colorsCacheMu sync.Mutex // protects colorsCache -) - -// Color defines a custom color object which is defined by SGR parameters. -type Color struct { - params []Attribute - noColor *bool -} - -// Attribute defines a single SGR Code -type Attribute int - -const escape = "\x1b" - -// Base attributes -const ( - Reset Attribute = iota - Bold - Faint - Italic - Underline - BlinkSlow - BlinkRapid - ReverseVideo - Concealed - CrossedOut -) - -// Foreground text colors -const ( - FgBlack Attribute = iota + 30 - FgRed - FgGreen - FgYellow - FgBlue - FgMagenta - FgCyan - FgWhite -) - -// Foreground Hi-Intensity text colors -const ( - FgHiBlack Attribute = iota + 90 - FgHiRed - FgHiGreen - FgHiYellow - FgHiBlue - FgHiMagenta - FgHiCyan - FgHiWhite -) - -// Background text colors -const ( - BgBlack Attribute = iota + 40 - BgRed - BgGreen - BgYellow - BgBlue - BgMagenta - BgCyan - BgWhite -) - -// Background Hi-Intensity text colors -const ( - BgHiBlack Attribute = iota + 100 - BgHiRed - BgHiGreen - BgHiYellow - BgHiBlue - BgHiMagenta - BgHiCyan - BgHiWhite -) - -// New returns a newly created color object. -func New(value ...Attribute) *Color { - c := &Color{params: make([]Attribute, 0)} - c.Add(value...) - return c -} - -// Set sets the given parameters immediately. It will change the color of -// output with the given SGR parameters until color.Unset() is called. -func Set(p ...Attribute) *Color { - c := New(p...) - c.Set() - return c -} - -// Unset resets all escape attributes and clears the output. Usually should -// be called after Set(). -func Unset() { - if NoColor { - return - } - - fmt.Fprintf(Output, "%s[%dm", escape, Reset) -} - -// Set sets the SGR sequence. -func (c *Color) Set() *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(Output, c.format()) - return c -} - -func (c *Color) unset() { - if c.isNoColorSet() { - return - } - - Unset() -} - -func (c *Color) setWriter(w io.Writer) *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(w, c.format()) - return c -} - -func (c *Color) unsetWriter(w io.Writer) { - if c.isNoColorSet() { - return - } - - if NoColor { - return - } - - fmt.Fprintf(w, "%s[%dm", escape, Reset) -} - -// Add is used to chain SGR parameters. Use as many as parameters to combine -// and create custom color objects. Example: Add(color.FgRed, color.Underline). -func (c *Color) Add(value ...Attribute) *Color { - c.params = append(c.params, value...) - return c -} - -func (c *Color) prepend(value Attribute) { - c.params = append(c.params, 0) - copy(c.params[1:], c.params[0:]) - c.params[0] = value -} - -// Fprint formats using the default formats for its operands and writes to w. -// Spaces are added between operands when neither is a string. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprint(w, a...) -} - -// Print formats using the default formats for its operands and writes to -// standard output. Spaces are added between operands when neither is a -// string. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Print(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprint(Output, a...) -} - -// Fprintf formats according to a format specifier and writes to w. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintf(w, format, a...) -} - -// Printf formats according to a format specifier and writes to standard output. -// It returns the number of bytes written and any write error encountered. -// This is the standard fmt.Printf() method wrapped with the given color. -func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintf(Output, format, a...) -} - -// Fprintln formats using the default formats for its operands and writes to w. -// Spaces are always added between operands and a newline is appended. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintln(w, a...) -} - -// Println formats using the default formats for its operands and writes to -// standard output. Spaces are always added between operands and a newline is -// appended. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Println(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintln(Output, a...) -} - -// Sprint is just like Print, but returns a string instead of printing it. -func (c *Color) Sprint(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) -} - -// Sprintln is just like Println, but returns a string instead of printing it. -func (c *Color) Sprintln(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) -} - -// Sprintf is just like Printf, but returns a string instead of printing it. -func (c *Color) Sprintf(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) -} - -// FprintFunc returns a new function that prints the passed arguments as -// colorized with color.Fprint(). -func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprint(w, a...) - } -} - -// PrintFunc returns a new function that prints the passed arguments as -// colorized with color.Print(). -func (c *Color) PrintFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Print(a...) - } -} - -// FprintfFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintf(). -func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { - return func(w io.Writer, format string, a ...interface{}) { - c.Fprintf(w, format, a...) - } -} - -// PrintfFunc returns a new function that prints the passed arguments as -// colorized with color.Printf(). -func (c *Color) PrintfFunc() func(format string, a ...interface{}) { - return func(format string, a ...interface{}) { - c.Printf(format, a...) - } -} - -// FprintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintln(). -func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprintln(w, a...) - } -} - -// PrintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Println(). -func (c *Color) PrintlnFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Println(a...) - } -} - -// SprintFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprint(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output, example: -// -// put := New(FgYellow).SprintFunc() -// fmt.Fprintf(color.Output, "This is a %s", put("warning")) -func (c *Color) SprintFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) - } -} - -// SprintfFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintf(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { - return func(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) - } -} - -// SprintlnFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintln(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintlnFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) - } -} - -// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" -// an example output might be: "1;36" -> bold cyan -func (c *Color) sequence() string { - format := make([]string, len(c.params)) - for i, v := range c.params { - format[i] = strconv.Itoa(int(v)) - } - - return strings.Join(format, ";") -} - -// wrap wraps the s string with the colors attributes. The string is ready to -// be printed. -func (c *Color) wrap(s string) string { - if c.isNoColorSet() { - return s - } - - return c.format() + s + c.unformat() -} - -func (c *Color) format() string { - return fmt.Sprintf("%s[%sm", escape, c.sequence()) -} - -func (c *Color) unformat() string { - return fmt.Sprintf("%s[%dm", escape, Reset) -} - -// DisableColor disables the color output. Useful to not change any existing -// code and still being able to output. Can be used for flags like -// "--no-color". To enable back use EnableColor() method. -func (c *Color) DisableColor() { - c.noColor = boolPtr(true) -} - -// EnableColor enables the color output. Use it in conjunction with -// DisableColor(). Otherwise this method has no side effects. -func (c *Color) EnableColor() { - c.noColor = boolPtr(false) -} - -func (c *Color) isNoColorSet() bool { - // check first if we have user setted action - if c.noColor != nil { - return *c.noColor - } - - // if not return the global option, which is disabled by default - return NoColor -} - -// Equals returns a boolean value indicating whether two colors are equal. -func (c *Color) Equals(c2 *Color) bool { - if len(c.params) != len(c2.params) { - return false - } - - for _, attr := range c.params { - if !c2.attrExists(attr) { - return false - } - } - - return true -} - -func (c *Color) attrExists(a Attribute) bool { - for _, attr := range c.params { - if attr == a { - return true - } - } - - return false -} - -func boolPtr(v bool) *bool { - return &v -} - -func getCachedColor(p Attribute) *Color { - colorsCacheMu.Lock() - defer colorsCacheMu.Unlock() - - c, ok := colorsCache[p] - if !ok { - c = New(p) - colorsCache[p] = c - } - - return c -} - -func colorPrint(format string, p Attribute, a ...interface{}) { - c := getCachedColor(p) - - if !strings.HasSuffix(format, "\n") { - format += "\n" - } - - if len(a) == 0 { - c.Print(format) - } else { - c.Printf(format, a...) - } -} - -func colorString(format string, p Attribute, a ...interface{}) string { - c := getCachedColor(p) - - if len(a) == 0 { - return c.SprintFunc()(format) - } - - return c.SprintfFunc()(format, a...) -} - -// Black is a convenient helper function to print with black foreground. A -// newline is appended to format by default. -func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } - -// Red is a convenient helper function to print with red foreground. A -// newline is appended to format by default. -func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } - -// Green is a convenient helper function to print with green foreground. A -// newline is appended to format by default. -func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } - -// Yellow is a convenient helper function to print with yellow foreground. -// A newline is appended to format by default. -func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } - -// Blue is a convenient helper function to print with blue foreground. A -// newline is appended to format by default. -func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } - -// Magenta is a convenient helper function to print with magenta foreground. -// A newline is appended to format by default. -func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } - -// Cyan is a convenient helper function to print with cyan foreground. A -// newline is appended to format by default. -func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } - -// White is a convenient helper function to print with white foreground. A -// newline is appended to format by default. -func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } - -// BlackString is a convenient helper function to return a string with black -// foreground. -func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } - -// RedString is a convenient helper function to return a string with red -// foreground. -func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } - -// GreenString is a convenient helper function to return a string with green -// foreground. -func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } - -// YellowString is a convenient helper function to return a string with yellow -// foreground. -func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } - -// BlueString is a convenient helper function to return a string with blue -// foreground. -func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } - -// MagentaString is a convenient helper function to return a string with magenta -// foreground. -func MagentaString(format string, a ...interface{}) string { - return colorString(format, FgMagenta, a...) -} - -// CyanString is a convenient helper function to return a string with cyan -// foreground. -func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } - -// WhiteString is a convenient helper function to return a string with white -// foreground. -func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } - -// HiBlack is a convenient helper function to print with hi-intensity black foreground. A -// newline is appended to format by default. -func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } - -// HiRed is a convenient helper function to print with hi-intensity red foreground. A -// newline is appended to format by default. -func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } - -// HiGreen is a convenient helper function to print with hi-intensity green foreground. A -// newline is appended to format by default. -func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } - -// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. -// A newline is appended to format by default. -func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } - -// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A -// newline is appended to format by default. -func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } - -// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. -// A newline is appended to format by default. -func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } - -// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A -// newline is appended to format by default. -func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } - -// HiWhite is a convenient helper function to print with hi-intensity white foreground. A -// newline is appended to format by default. -func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } - -// HiBlackString is a convenient helper function to return a string with hi-intensity black -// foreground. -func HiBlackString(format string, a ...interface{}) string { - return colorString(format, FgHiBlack, a...) -} - -// HiRedString is a convenient helper function to return a string with hi-intensity red -// foreground. -func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } - -// HiGreenString is a convenient helper function to return a string with hi-intensity green -// foreground. -func HiGreenString(format string, a ...interface{}) string { - return colorString(format, FgHiGreen, a...) -} - -// HiYellowString is a convenient helper function to return a string with hi-intensity yellow -// foreground. -func HiYellowString(format string, a ...interface{}) string { - return colorString(format, FgHiYellow, a...) -} - -// HiBlueString is a convenient helper function to return a string with hi-intensity blue -// foreground. -func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } - -// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta -// foreground. -func HiMagentaString(format string, a ...interface{}) string { - return colorString(format, FgHiMagenta, a...) -} - -// HiCyanString is a convenient helper function to return a string with hi-intensity cyan -// foreground. -func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } - -// HiWhiteString is a convenient helper function to return a string with hi-intensity white -// foreground. -func HiWhiteString(format string, a ...interface{}) string { - return colorString(format, FgHiWhite, a...) -} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go deleted file mode 100644 index cf1e9650..00000000 --- a/vendor/github.com/fatih/color/doc.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Package color is an ANSI color package to output colorized or SGR defined -output to the standard output. The API can be used in several way, pick one -that suits you. - -Use simple and default helper functions with predefined foreground colors: - - color.Cyan("Prints text in cyan.") - - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") - - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") - - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") - -However there are times where custom color mixes are required. Below are some -examples to create custom color objects and use the print functions of each -separate color object. - - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") - - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") - - - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) - - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") - - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") - - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") - -You can create PrintXxx functions to simplify even more: - - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) - - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") - -You can also FprintXxx functions to pass your own io.Writer: - - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") - - -Or create SprintXxx functions to mix strings with other non-colorized strings: - - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() - - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) - -Windows support is enabled by default. All Print functions work as intended. -However only for color.SprintXXX functions, user should use fmt.FprintXXX and -set the output to color.Output: - - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) - -Using with existing code is possible. Just use the Set() method to set the -standard output to the given parameters. That way a rewrite of an existing -code is not required. - - // Use handy standard colors. - color.Set(color.FgYellow) - - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") - - color.Unset() // don't forget to unset - - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function - - fmt.Println("All text will be now bold magenta.") - -There might be a case where you want to disable color output (for example to -pipe the standard output of your app to somewhere else). `Color` has support to -disable colors both globally and for single color definition. For example -suppose you have a CLI app and a `--no-color` bool flag. You can easily disable -the color output with: - - var flagNoColor = flag.Bool("no-color", false, "Disable color output") - - if *flagNoColor { - color.NoColor = true // disables colorized output - } - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - - c := color.New(color.FgCyan) - c.Println("Prints cyan text") - - c.DisableColor() - c.Println("This is printed without any color") - - c.EnableColor() - c.Println("This prints again cyan...") -*/ -package color diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7a..00000000 --- a/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c20..00000000 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index f57de90d..00000000 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 00d65f32..00000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C test_proto - make -C proto3_proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index a26b046d..00000000 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index d9aa3c42..00000000 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,428 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index fe1bd7d9..00000000 --- a/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - default: // E.g., *pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c91..00000000 --- a/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index e748e173..00000000 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2c..00000000 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 0f5fb173..00000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,33 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1c..00000000 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 44ebd457..00000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,604 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - case extensionsBytes: - return slowExtensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if ea, ok := pbi.(slowExtensionAdapter); ok { - pbi = ea.extensionsBytes - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, ok := pb.(extensionsBytes); ok { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - return decodeExtensionFromBytes(extension, *ext) - } - - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if cerr := checkExtensionTypes(epb, extension); cerr != nil { - return nil, cerr - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, ok := pb.(extensionsBytes); ok { - newb, err := encodeExtension(extension, value) - if err != nil { - return err - } - bb := epb.GetExtensions() - *bb = append(*bb, newb...) - return nil - } - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index 53ebd8cc..00000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,368 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" -) - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -type slowExtensionAdapter struct { - extensionsBytes -} - -func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { - panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") -} - -func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - b := s.GetExtensions() - m, err := BytesToExtensionsMap(*b) - if err != nil { - panic(err) - } - return m, notLocker{} -} - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - if err := this.Encode(); err != nil { - return false - } - if err := that.Encode(); err != nil { - return false - } - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - if err := this.Encode(); err != nil { - return 1 - } - if err := that.Encode(); err != nil { - return -1 - } - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - info := getMarshalInfo(reflect.TypeOf(m)) - return info.sizeV1Extensions(m.extensionsWrite()) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[o:], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - o += n - } - return o, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - e := m[id] - if err := e.Encode(); err != nil { - return nil, err - } - return e.enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { - u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) - ei := u.getExtElemInfo(extension) - v := value - p := toAddrPointer(&v, ei.isptr) - siz := ei.sizer(p, SizeVarint(ei.wiretag)) - buf := make([]byte, 0, siz) - return ei.marshaler(buf, p, ei.wiretag, false) -} - -func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { - o := 0 - for o < len(buf) { - tag, n := DecodeVarint((buf)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - if o+n > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - l, err := size((buf)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - if o+n+l > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - v, err := decodeExtension((buf)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) -} - -func (this *Extension) Encode() error { - if this.enc == nil { - var err error - this.enc, err = encodeExtension(this.desc, this.value) - if err != nil { - return err - } - } - return nil -} - -func (this Extension) GoString() string { - if err := this.Encode(); err != nil { - return fmt.Sprintf("error encoding extension: %v", err) - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index b2271d0b..00000000 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,987 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or T or []*T or []T - switch f.Kind() { - case reflect.Struct: - setDefaults(f, recur, zeros) - - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.Kind() == reflect.Ptr && e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Struct: - nestedMessage = true // non-nullable - - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr, reflect.Struct: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion1 = true - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index b3aa3919..00000000 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,50 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -type Sizer interface { - Size() int -} - -type ProtoSizer interface { - ProtoSize() int -} - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index 3b6ca41d..00000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,314 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "sync" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad908..00000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 7ffd3c29..00000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,59 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" -) - -// TODO: untested, so probably incorrect. - -func (p pointer) getRef() pointer { - return pointer{v: p.v.Addr()} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d..00000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index aca8eed0..00000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,56 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func (p pointer) getRef() pointer { - return pointer{p: (unsafe.Pointer)(&p.p)} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 04dcb8d9..00000000 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,608 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - WktPointer bool - - stype reflect.Type // set for struct types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - case f == "wktptr": - p.WktPointer = true - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.ctype = typ - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setTag(lockGetProp) - return - } - if p.WktPointer && !isMap { - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - case reflect.Struct: - p.stype = typ - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - case reflect.Struct: - p.stype = t3 - } - case reflect.Struct: - p.stype = t2 - } - - case reflect.Map: - - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.MapValProp.CustomType = p.CustomType - p.MapValProp.StdDuration = p.StdDuration - p.MapValProp.StdTime = p.StdTime - p.MapValProp.WktPointer = p.WktPointer - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 40ea3dd9..00000000 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,36 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() -var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93f..00000000 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go deleted file mode 100644 index ba58c49a..00000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,3006 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements - - hassizer bool // has custom sizer - hasprotosizer bool // has custom protosizer - - bytesExtensions field // offset of XXX_extensions where the field type is []byte -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex - - uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - // Uses the message's Size method if available - if u.hassizer { - s := ptr.asPointerTo(u.typ).Interface().(Sizer) - return s.Size() - } - // Uses the message's ProtoSize method if available - if u.hasprotosizer { - s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) - return s.ProtoSize() - } - - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - n += len(s) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - b = append(b, s...) - } - for _, f := range u.fields { - if f.required { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.bytesExtensions = invalidField - u.sizecache = invalidField - isOneofMessage := false - - if reflect.PtrTo(t).Implements(sizerType) { - u.hassizer = true - } - if reflect.PtrTo(t).Implements(protosizerType) { - u.hasprotosizer = true - } - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Tag.Get("protobuf_oneof") != "" { - isOneofMessage = true - } - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - if f.Type.Kind() == reflect.Map { - u.v1extensions = toField(&f) - } else { - u.bytesExtensions = toField(&f) - } - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // get oneof implementers - var oneofImplementers []interface{} - // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - } - } -} - -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - ctype := false - isTime := false - isDuration := false - isWktPointer := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - if strings.HasPrefix(tags[i], "customtype=") { - ctype = true - } - if tags[i] == "stdtime" { - isTime = true - } - if tags[i] == "stdduration" { - isDuration = true - } - if tags[i] == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - if !proto3 && !pointer && !slice { - nozero = false - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - if pointer { - return makeCustomPtrMarshaler(getMarshalInfo(t)) - } - return makeCustomMarshaler(getMarshalInfo(t)) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeTimePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeTimePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeTimeSliceMarshaler(getMarshalInfo(t)) - } - return makeTimeMarshaler(getMarshalInfo(t)) - } - - if isDuration { - if pointer { - if slice { - return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationPtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeDurationSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationMarshaler(getMarshalInfo(t)) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValueMarshaler(getMarshalInfo(t)) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValueMarshaler(getMarshalInfo(t)) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValueMarshaler(getMarshalInfo(t)) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValueMarshaler(getMarshalInfo(t)) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValueMarshaler(getMarshalInfo(t)) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if pointer { - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } else { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageRefMarshaler(getMarshalInfo(t)) - } - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - tags := strings.Split(f.Tag.Get("protobuf"), ",") - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - stdOptions := false - for _, t := range tags { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "stdduration" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if p.deterministic { - if _, ok := pb.(Marshaler); ok { - return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) - } - } - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - var b []byte - b, err = m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go deleted file mode 100644 index 997f57c1..00000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go +++ /dev/null @@ -1,388 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -// makeMessageRefMarshaler differs a bit from makeMessageMarshaler -// It marshal a message T instead of a *T -func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - siz := u.size(ptr) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - b = appendVarint(b, wiretag) - siz := u.cachedsize(ptr) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, ptr, deterministic) - } -} - -// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler -// It marshals a slice of messages []T instead of []*T -func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - var err, errreq error - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - b = appendVarint(b, wiretag) - siz := u.size(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - - return b, errreq - } -} - -func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go deleted file mode 100644 index f520106e..00000000 --- a/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ /dev/null @@ -1,657 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - mergeInfo.merge(dst, src) - } - case isSlice: // E.g., []*pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mergeInfo.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mergeInfo.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go deleted file mode 100644 index e6b15c76..00000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2245 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - if u.bytesExtensions.IsValid() { - z = m.offset(u.bytesExtensions).toBytes() - break - } - panic("no extensions field available") - } - } - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - u.bytesExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { - u.oldExtensions = toField(&f) - continue - } else if f.Type == reflect.TypeOf(([]byte)(nil)) { - u.bytesExtensions = toField(&f) - continue - } - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if fn.IsValid() && len(oneofFields) > 0 { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - } - } - - // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - ctype := false - isTime := false - isDuration := false - isWktPointer := false - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - if strings.HasPrefix(tag, "customtype=") { - ctype = true - } - if tag == "stdtime" { - isTime = true - } - if tag == "stdduration" { - isDuration = true - } - if tag == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) - } - if pointer { - return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalCustom(getUnmarshalInfo(t), name) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTime(getUnmarshalInfo(t), name) - } - - if isDuration { - if pointer { - if slice { - return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDuration(getUnmarshalInfo(t), name) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessage(getUnmarshalInfo(t), name) - } - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - tagArray := strings.Split(f.Tag.Get("protobuf"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - for _, t := range tagArray { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - } - if t == "stdduration" { - valTags = append(valTags, t) - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) <= 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go deleted file mode 100644 index 00d6c7ad..00000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go +++ /dev/null @@ -1,385 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f // gogo: changed from v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.New(sub.typ)) - m := s.Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := reflect.New(sub.typ) - c := m.Interface().(custom) - if err := c.Unmarshal(b[:x]); err != nil { - return nil, err - } - v := valToPointer(m) - f.appendRef(v, sub.typ) - return b[x:], nil - } -} - -func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - m := f.asPointerTo(sub.typ).Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&d)) - return b[x:], nil - } -} - -func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(d)) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&d)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(d)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 0407ba85..00000000 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,928 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa0..00000000 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index 1ce0be2f..00000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int8: - if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int16: - if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint8: - if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint16: - if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f654..00000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index 38439fa9..00000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go deleted file mode 100644 index b175d1b6..00000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers.go +++ /dev/null @@ -1,1888 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go deleted file mode 100644 index c1cf7bf8..00000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go +++ /dev/null @@ -1,113 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -type float64Value struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float64Value) Reset() { *m = float64Value{} } -func (*float64Value) ProtoMessage() {} -func (*float64Value) String() string { return "float64" } - -type float32Value struct { - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float32Value) Reset() { *m = float32Value{} } -func (*float32Value) ProtoMessage() {} -func (*float32Value) String() string { return "float32" } - -type int64Value struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int64Value) Reset() { *m = int64Value{} } -func (*int64Value) ProtoMessage() {} -func (*int64Value) String() string { return "int64" } - -type uint64Value struct { - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint64Value) Reset() { *m = uint64Value{} } -func (*uint64Value) ProtoMessage() {} -func (*uint64Value) String() string { return "uint64" } - -type int32Value struct { - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int32Value) Reset() { *m = int32Value{} } -func (*int32Value) ProtoMessage() {} -func (*int32Value) String() string { return "int32" } - -type uint32Value struct { - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint32Value) Reset() { *m = uint32Value{} } -func (*uint32Value) ProtoMessage() {} -func (*uint32Value) String() string { return "uint32" } - -type boolValue struct { - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *boolValue) Reset() { *m = boolValue{} } -func (*boolValue) ProtoMessage() {} -func (*boolValue) String() string { return "bool" } - -type stringValue struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *stringValue) Reset() { *m = stringValue{} } -func (*stringValue) ProtoMessage() {} -func (*stringValue) String() string { return "string" } - -type bytesValue struct { - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *bytesValue) Reset() { *m = bytesValue{} } -func (*bytesValue) ProtoMessage() {} -func (*bytesValue) String() string { return "[]byte" } - -func init() { - RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") - RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") - RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") - RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") - RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") - RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") - RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") - RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") - RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") -} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 00000000..6bd9f674 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,85 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package empty + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_900544acb223d5b8, []int{0} +} + +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { + proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) +} + +var fileDescriptor_900544acb223d5b8 = []byte{ + // 148 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, + 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, + 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, + 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, + 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, + 0xb7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto similarity index 54% rename from vendor/github.com/gogo/protobuf/proto/custom_gogo.go rename to vendor/github.com/golang/protobuf/ptypes/empty/empty.proto index 24552483..03cacd23 100644 --- a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto @@ -1,7 +1,6 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -13,6 +12,9 @@ // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -26,14 +28,25 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -package proto +syntax = "proto3"; -import "reflect" +package google.protobuf; -type custom interface { - Marshal() ([]byte, error) - Unmarshal(data []byte) error - Size() int -} +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/empty"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; -var customType = reflect.TypeOf((*custom)(nil)).Elem() +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60..00000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f..00000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f6..00000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc68268..00000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 9d92c11f..00000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9d..00000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9a..00000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod deleted file mode 100644 index fc84cd79..00000000 --- a/vendor/github.com/google/uuid/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b1746163..00000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) - h.Write(data) - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 7f9e0c6c..00000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err == nil { - *uuid = id - } - return err -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b0..00000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78edc..00000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcddb..00000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index f326b54d..00000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06cd..00000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c737..00000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 524404cc..00000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 199a1ac6..00000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - copy(uuid[10:], nodeID[:]) - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 84af91c9..00000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/hashicorp/go-cty/LICENSE similarity index 94% rename from vendor/github.com/mattn/go-colorable/LICENSE rename to vendor/github.com/hashicorp/go-cty/LICENSE index 91b5cef3..d6503b55 100644 --- a/vendor/github.com/mattn/go-colorable/LICENSE +++ b/vendor/github.com/hashicorp/go-cty/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2016 Yasuhiro Matsumoto +Copyright (c) 2017-2018 Martin Atkins Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/hashicorp/go-cty/cty/capsule.go b/vendor/github.com/hashicorp/go-cty/cty/capsule.go new file mode 100644 index 00000000..2fdc15ea --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/capsule.go @@ -0,0 +1,128 @@ +package cty + +import ( + "fmt" + "reflect" +) + +type capsuleType struct { + typeImplSigil + Name string + GoType reflect.Type + Ops *CapsuleOps +} + +func (t *capsuleType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(*capsuleType); ok { + // capsule types compare by pointer identity + return otherP == t + } + return false +} + +func (t *capsuleType) FriendlyName(mode friendlyTypeNameMode) string { + return t.Name +} + +func (t *capsuleType) GoString() string { + impl := t.Ops.TypeGoString + if impl == nil { + // To get a useful representation of our native type requires some + // shenanigans. + victimVal := reflect.Zero(t.GoType) + if t.Ops == noCapsuleOps { + return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface()) + } else { + // Including the operations in the output will make this _very_ long, + // so in practice any capsule type with ops ought to provide a + // TypeGoString function to override this with something more + // reasonable. + return fmt.Sprintf("cty.CapsuleWithOps(%q, reflect.TypeOf(%#v), %#v)", t.Name, victimVal.Interface(), t.Ops) + } + } + return impl(t.GoType) +} + +// Capsule creates a new Capsule type. +// +// A Capsule type is a special type that can be used to transport arbitrary +// Go native values of a given type through the cty type system. A language +// that uses cty as its type system might, for example, provide functions +// that return capsule-typed values and then other functions that operate +// on those values. +// +// From cty's perspective, Capsule types have a few interesting characteristics, +// described in the following paragraphs. +// +// Each capsule type has an associated Go native type that it is able to +// transport. Capsule types compare by identity, so each call to the +// Capsule function creates an entirely-distinct cty Type, even if two calls +// use the same native type. +// +// Each capsule-typed value contains a pointer to a value of the given native +// type. A capsule-typed value by default supports no operations except +// equality, and equality is implemented by pointer identity of the +// encapsulated pointer. A capsule type can optionally have its own +// implementations of certain operations if it is created with CapsuleWithOps +// instead of Capsule. +// +// The given name is used as the new type's "friendly name". This can be any +// string in principle, but will usually be a short, all-lowercase name aimed +// at users of the embedding language (i.e. not mention Go-specific details) +// and will ideally not create ambiguity with any predefined cty type. +// +// Capsule types are never introduced by any standard cty operation, so a +// calling application opts in to including them within its own type system +// by creating them and introducing them via its own functions. At that point, +// the application is responsible for dealing with any capsule-typed values +// that might be returned. +func Capsule(name string, nativeType reflect.Type) Type { + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + Ops: noCapsuleOps, + }, + } +} + +// CapsuleWithOps is like Capsule except the caller may provide an object +// representing some overloaded operation implementations to associate with +// the given capsule type. +// +// All of the other caveats and restrictions for capsule types still apply, but +// overloaded operations can potentially help a capsule type participate better +// in cty operations. +func CapsuleWithOps(name string, nativeType reflect.Type, ops *CapsuleOps) Type { + // Copy the operations to make sure the caller can't modify them after + // we're constructed. + ourOps := *ops + ourOps.assertValid() + + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + Ops: &ourOps, + }, + } +} + +// IsCapsuleType returns true if this type is a capsule type, as created +// by cty.Capsule . +func (t Type) IsCapsuleType() bool { + _, ok := t.typeImpl.(*capsuleType) + return ok +} + +// EncapsulatedType returns the encapsulated native type of a capsule type, +// or panics if the receiver is not a Capsule type. +// +// Is IsCapsuleType to determine if this method is safe to call. +func (t Type) EncapsulatedType() reflect.Type { + impl, ok := t.typeImpl.(*capsuleType) + if !ok { + panic("not a capsule type") + } + return impl.GoType +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go b/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go new file mode 100644 index 00000000..3ff6855e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go @@ -0,0 +1,132 @@ +package cty + +import ( + "reflect" +) + +// CapsuleOps represents a set of overloaded operations for a capsule type. +// +// Each field is a reference to a function that can either be nil or can be +// set to an implementation of the corresponding operation. If an operation +// function is nil then it isn't supported for the given capsule type. +type CapsuleOps struct { + // GoString provides the GoString implementation for values of the + // corresponding type. Conventionally this should return a string + // representation of an expression that would produce an equivalent + // value. + GoString func(val interface{}) string + + // TypeGoString provides the GoString implementation for the corresponding + // capsule type itself. + TypeGoString func(goTy reflect.Type) string + + // Equals provides the implementation of the Equals operation. This is + // called only with known, non-null values of the corresponding type, + // but if the corresponding type is a compound type then it must be + // ready to detect and handle nested unknown or null values, usually + // by recursively calling Value.Equals on those nested values. + // + // The result value must always be of type cty.Bool, or the Equals + // operation will panic. + // + // If RawEquals is set without also setting Equals, the RawEquals + // implementation will be used as a fallback implementation. That fallback + // is appropriate only for leaf types that do not contain any nested + // cty.Value that would need to distinguish Equals vs. RawEquals for their + // own equality. + // + // If RawEquals is nil then Equals must also be nil, selecting the default + // pointer-identity comparison instead. + Equals func(a, b interface{}) Value + + // RawEquals provides the implementation of the RawEquals operation. + // This is called only with known, non-null values of the corresponding + // type, but if the corresponding type is a compound type then it must be + // ready to detect and handle nested unknown or null values, usually + // by recursively calling Value.RawEquals on those nested values. + // + // If RawEquals is nil, values of the corresponding type are compared by + // pointer identity of the encapsulated value. + RawEquals func(a, b interface{}) bool + + // ConversionFrom can provide conversions from the corresponding type to + // some other type when values of the corresponding type are used with + // the "convert" package. (The main cty package does not use this operation.) + // + // This function itself returns a function, allowing it to switch its + // behavior depending on the given source type. Return nil to indicate + // that no such conversion is available. + ConversionFrom func(src Type) func(interface{}, Path) (Value, error) + + // ConversionTo can provide conversions to the corresponding type from + // some other type when values of the corresponding type are used with + // the "convert" package. (The main cty package does not use this operation.) + // + // This function itself returns a function, allowing it to switch its + // behavior depending on the given destination type. Return nil to indicate + // that no such conversion is available. + ConversionTo func(dst Type) func(Value, Path) (interface{}, error) + + // ExtensionData is an extension point for applications that wish to + // create their own extension features using capsule types. + // + // The key argument is any value that can be compared with Go's == + // operator, but should be of a named type in a package belonging to the + // application defining the key. An ExtensionData implementation must + // check to see if the given key is familar to it, and if so return a + // suitable value for the key. + // + // If the given key is unrecognized, the ExtensionData function must + // return a nil interface. (Importantly, not an interface containing a nil + // pointer of some other type.) + // The common implementation of ExtensionData is a single switch statement + // over "key" which has a default case returning nil. + // + // The meaning of any given key is entirely up to the application that + // defines it. Applications consuming ExtensionData from capsule types + // should do so defensively: if the result of ExtensionData is not valid, + // prefer to ignore it or gracefully produce an error rather than causing + // a panic. + ExtensionData func(key interface{}) interface{} +} + +// noCapsuleOps is a pointer to a CapsuleOps with no functions set, which +// is used as the default operations value when a type is created using +// the Capsule function. +var noCapsuleOps = &CapsuleOps{} + +func (ops *CapsuleOps) assertValid() { + if ops.RawEquals == nil && ops.Equals != nil { + panic("Equals cannot be set without RawEquals") + } +} + +// CapsuleOps returns a pointer to the CapsuleOps value for a capsule type, +// or panics if the receiver is not a capsule type. +// +// The caller must not modify the CapsuleOps. +func (ty Type) CapsuleOps() *CapsuleOps { + if !ty.IsCapsuleType() { + panic("not a capsule-typed value") + } + + return ty.typeImpl.(*capsuleType).Ops +} + +// CapsuleExtensionData is a convenience interface to the ExtensionData +// function that can be optionally implemented for a capsule type. It will +// check to see if the underlying type implements ExtensionData and call it +// if so. If not, it will return nil to indicate that the given key is not +// supported. +// +// See the documentation for CapsuleOps.ExtensionData for more information +// on the purpose of and usage of this mechanism. +// +// If CapsuleExtensionData is called on a non-capsule type then it will panic. +func (ty Type) CapsuleExtensionData(key interface{}) interface{} { + ops := ty.CapsuleOps() + if ops.ExtensionData == nil { + return nil + } + return ops.ExtensionData(key) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/collection.go b/vendor/github.com/hashicorp/go-cty/cty/collection.go new file mode 100644 index 00000000..ab3919b1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/collection.go @@ -0,0 +1,34 @@ +package cty + +import ( + "errors" +) + +type collectionTypeImpl interface { + ElementType() Type +} + +// IsCollectionType returns true if the given type supports the operations +// that are defined for all collection types. +func (t Type) IsCollectionType() bool { + _, ok := t.typeImpl.(collectionTypeImpl) + return ok +} + +// ElementType returns the element type of the receiver if it is a collection +// type, or panics if it is not. Use IsCollectionType first to test whether +// this method will succeed. +func (t Type) ElementType() Type { + if ct, ok := t.typeImpl.(collectionTypeImpl); ok { + return ct.ElementType() + } + panic(errors.New("not a collection type")) +} + +// ElementCallback is a callback type used for iterating over elements of +// collections and attributes of objects. +// +// The types of key and value depend on what type is being iterated over. +// Return true to stop iterating after the current element, or false to +// continue iterating. +type ElementCallback func(key Value, val Value) (stop bool) diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go b/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go new file mode 100644 index 00000000..6ad3bff4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go @@ -0,0 +1,165 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// compareTypes implements a preference order for unification. +// +// The result of this method is not useful for anything other than unification +// preferences, since it assumes that the caller will verify that any suggested +// conversion is actually possible and it is thus able to to make certain +// optimistic assumptions. +func compareTypes(a cty.Type, b cty.Type) int { + + // DynamicPseudoType always has lowest preference, because anything can + // convert to it (it acts as a placeholder for "any type") and we want + // to optimistically assume that any dynamics will converge on matching + // their neighbors. + if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType { + if a != cty.DynamicPseudoType { + return -1 + } + if b != cty.DynamicPseudoType { + return 1 + } + return 0 + } + + if a.IsPrimitiveType() && b.IsPrimitiveType() { + // String is a supertype of all primitive types, because we can + // represent all primitive values as specially-formatted strings. + if a == cty.String || b == cty.String { + if a != cty.String { + return 1 + } + if b != cty.String { + return -1 + } + return 0 + } + } + + if a.IsListType() && b.IsListType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsSetType() && b.IsSetType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsMapType() && b.IsMapType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + + // From this point on we may have swapped the two items in order to + // simplify our cases. Therefore any non-zero return after this point + // must be multiplied by "swap" to potentially invert the return value + // if needed. + swap := 1 + switch { + case a.IsTupleType() && b.IsListType(): + fallthrough + case a.IsObjectType() && b.IsMapType(): + fallthrough + case a.IsSetType() && b.IsTupleType(): + fallthrough + case a.IsSetType() && b.IsListType(): + a, b = b, a + swap = -1 + } + + if b.IsSetType() && (a.IsTupleType() || a.IsListType()) { + // We'll just optimistically assume that the element types are + // unifyable/convertible, and let a second recursive pass + // figure out how to make that so. + return -1 * swap + } + + if a.IsListType() && b.IsTupleType() { + // We'll just optimistically assume that the tuple's element types + // can be unified into something compatible with the list's element + // type. + return -1 * swap + } + + if a.IsMapType() && b.IsObjectType() { + // We'll just optimistically assume that the object's attribute types + // can be unified into something compatible with the map's element + // type. + return -1 * swap + } + + // For object and tuple types, comparing two types doesn't really tell + // the whole story because it may be possible to construct a new type C + // that is the supertype of both A and B by unifying each attribute/element + // separately. That possibility is handled by Unify as a follow-up if + // type sorting is insufficient to produce a valid result. + // + // Here we will take care of the simple possibilities where no new type + // is needed. + if a.IsObjectType() && b.IsObjectType() { + atysA := a.AttributeTypes() + atysB := b.AttributeTypes() + + if len(atysA) != len(atysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for k := range atysA { + if _, has := atysB[k]; !has { + return 0 + } + + cmp := compareTypes(atysA[k], atysB[k]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + if a.IsTupleType() && b.IsTupleType() { + etysA := a.TupleElementTypes() + etysB := b.TupleElementTypes() + + if len(etysA) != len(etysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for i := range etysA { + cmp := compareTypes(etysA[i], etysB[i]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + + return 0 +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go new file mode 100644 index 00000000..9c59c8f7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go @@ -0,0 +1,190 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversion is an internal variant of Conversion that carries around +// a cty.Path to be used in error responses. +type conversion func(cty.Value, cty.Path) (cty.Value, error) + +func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { + conv := getConversionKnown(in, out, unsafe) + if conv == nil { + return nil + } + + // Wrap the conversion in some standard checks that we don't want to + // have to repeat in every conversion function. + var ret conversion + ret = func(in cty.Value, path cty.Path) (cty.Value, error) { + if in.IsMarked() { + // We must unmark during the conversion and then re-apply the + // same marks to the result. + in, inMarks := in.Unmark() + v, err := ret(in, path) + if v != cty.NilVal { + v = v.WithMarks(inMarks) + } + return v, err + } + + if out == cty.DynamicPseudoType { + // Conversion to DynamicPseudoType always just passes through verbatim. + return in, nil + } + if !in.IsKnown() { + return cty.UnknownVal(out), nil + } + if in.IsNull() { + // We'll pass through nulls, albeit type converted, and let + // the caller deal with whatever handling they want to do in + // case null values are considered valid in some applications. + return cty.NullVal(out), nil + } + + return conv(in, path) + } + + return ret +} + +func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { + switch { + + case out == cty.DynamicPseudoType: + // Conversion *to* DynamicPseudoType means that the caller wishes + // to allow any type in this position, so we'll produce a do-nothing + // conversion that just passes through the value as-is. + return dynamicPassthrough + + case unsafe && in == cty.DynamicPseudoType: + // Conversion *from* DynamicPseudoType means that we have a value + // whose type isn't yet known during type checking. For these we will + // assume that conversion will succeed and deal with any errors that + // result (which is why we can only do this when "unsafe" is set). + return dynamicFixup(out) + + case in.IsPrimitiveType() && out.IsPrimitiveType(): + conv := primitiveConversionsSafe[in][out] + if conv != nil { + return conv + } + if unsafe { + return primitiveConversionsUnsafe[in][out] + } + return nil + + case out.IsObjectType() && in.IsObjectType(): + return conversionObjectToObject(in, out, unsafe) + + case out.IsTupleType() && in.IsTupleType(): + return conversionTupleToTuple(in, out, unsafe) + + case out.IsListType() && (in.IsListType() || in.IsSetType()): + inEty := in.ElementType() + outEty := out.ElementType() + if inEty.Equals(outEty) { + // This indicates that we're converting from list to set with + // the same element type, so we don't need an element converter. + return conversionCollectionToList(outEty, nil) + } + + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToList(outEty, convEty) + + case out.IsSetType() && (in.IsListType() || in.IsSetType()): + if in.IsListType() && !unsafe { + // Conversion from list to map is unsafe because it will lose + // information: the ordering will not be preserved, and any + // duplicate elements will be conflated. + return nil + } + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if inEty.Equals(outEty) { + // This indicates that we're converting from set to list with + // the same element type, so we don't need an element converter. + return conversionCollectionToSet(outEty, nil) + } + + if convEty == nil { + return nil + } + return conversionCollectionToSet(outEty, convEty) + + case out.IsMapType() && in.IsMapType(): + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToMap(outEty, convEty) + + case out.IsListType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToList(in, outEty, unsafe) + + case out.IsSetType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToSet(in, outEty, unsafe) + + case out.IsMapType() && in.IsObjectType(): + outEty := out.ElementType() + return conversionObjectToMap(in, outEty, unsafe) + + case out.IsObjectType() && in.IsMapType(): + if !unsafe { + // Converting a map to an object is an "unsafe" conversion, + // because we don't know if all the map keys will correspond to + // object attributes. + return nil + } + return conversionMapToObject(in, out, unsafe) + + case in.IsCapsuleType() || out.IsCapsuleType(): + if !unsafe { + // Capsule types can only participate in "unsafe" conversions, + // because we don't know enough about their conversion behaviors + // to be sure that they will always be safe. + return nil + } + if in.Equals(out) { + // conversion to self is never allowed + return nil + } + if out.IsCapsuleType() { + if fn := out.CapsuleOps().ConversionTo; fn != nil { + return conversionToCapsule(in, out, fn) + } + } + if in.IsCapsuleType() { + if fn := in.CapsuleOps().ConversionFrom; fn != nil { + return conversionFromCapsule(in, out, fn) + } + } + // No conversion operation is available, then. + return nil + + default: + return nil + + } +} + +// retConversion wraps a conversion (internal type) so it can be returned +// as a Conversion (public type). +func retConversion(conv conversion) Conversion { + if conv == nil { + return nil + } + + return func(in cty.Value) (cty.Value, error) { + return conv(in, cty.Path(nil)) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go new file mode 100644 index 00000000..6a6006af --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go @@ -0,0 +1,31 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +func conversionToCapsule(inTy, outTy cty.Type, fn func(inTy cty.Type) func(cty.Value, cty.Path) (interface{}, error)) conversion { + rawConv := fn(inTy) + if rawConv == nil { + return nil + } + + return func(in cty.Value, path cty.Path) (cty.Value, error) { + rawV, err := rawConv(in, path) + if err != nil { + return cty.NilVal, err + } + return cty.CapsuleVal(outTy, rawV), nil + } +} + +func conversionFromCapsule(inTy, outTy cty.Type, fn func(outTy cty.Type) func(interface{}, cty.Path) (cty.Value, error)) conversion { + rawConv := fn(outTy) + if rawConv == nil { + return nil + } + + return func(in cty.Value, path cty.Path) (cty.Value, error) { + return rawConv(in.EncapsulatedValue(), path) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go new file mode 100644 index 00000000..575973d3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go @@ -0,0 +1,488 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionCollectionToList returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a list. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a list. (For example, +// if we're converting from a set into a list of the same element type.) +func conversionCollectionToList(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(elems), nil + } +} + +// conversionCollectionToSet returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a set. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a set. (For example, +// if we're converting from a list into a set of the same element type.) +func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + // Prefer a concrete type over a dynamic type when returning an + // empty set + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(elems), nil + } +} + +// conversionCollectionToMap returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a map. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a map. +func conversionCollectionToMap(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, 0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: key, + } + + keyStr, err := Convert(key, cty.String) + if err != nil { + // Should never happen, because keys can only be numbers or + // strings and both can convert to string. + return cty.DynamicVal, elemPath.NewErrorf("cannot convert key type %s to string for map", key.Type().FriendlyName()) + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + + elems[keyStr.AsString()] = val + } + + if len(elems) == 0 { + // Prefer a concrete type over a dynamic type when returning an + // empty map + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.MapValEmpty(ety), nil + } + + if ety.IsCollectionType() || ety.IsObjectType() { + var err error + if elems, err = conversionUnifyCollectionElements(elems, path, false); err != nil { + return cty.NilVal, err + } + } + + if err := conversionCheckMapElementTypes(elems, path); err != nil { + return cty.NilVal, err + } + + return cty.MapVal(elems), nil + } +} + +// conversionTupleToSet returns a conversion that will take a value of the +// given tuple type and return a set of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToSet(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.SetValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + elemPath := append(path.Copy(), nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.SetVal(elems), nil + } +} + +// conversionTupleToList returns a conversion that will take a value of the +// given tuple type and return a list of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + elemPath := append(path.Copy(), nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.ListVal(elems), nil + } +} + +// conversionObjectToMap returns a conversion that will take a value of the +// given object type and return a map of the given element type. +// +// Will panic if the given objectType isn't actually an object type. +func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion { + objectAtys := objectType.AttributeTypes() + + if len(objectAtys) == 0 { + // Empty object short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.MapValEmpty(mapEty), nil + } + } + + if mapEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + objectAtysList := make([]cty.Type, 0, len(objectAtys)) + for _, aty := range objectAtys { + objectAtysList = append(objectAtysList, aty) + } + mapEty, _ = unify(objectAtysList, unsafe) + if mapEty == cty.NilType { + return nil + } + } + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(objectAty, mapEty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems[name.AsString()] = val + } + + if mapEty.IsCollectionType() || mapEty.IsObjectType() { + var err error + if elems, err = conversionUnifyCollectionElements(elems, path, unsafe); err != nil { + return cty.NilVal, err + } + } + + if err := conversionCheckMapElementTypes(elems, path); err != nil { + return cty.NilVal, err + } + + return cty.MapVal(elems), nil + } +} + +// conversionMapToObject returns a conversion that will take a value of the +// given map type and return an object of the given type. The object attribute +// types must all be compatible with the map element type. +// +// Will panic if the given mapType and objType are not maps and objects +// respectively. +func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conversion { + objectAtys := objType.AttributeTypes() + mapEty := mapType.ElementType() + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(mapEty, objectAty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + + // if there is no corresponding attribute, we skip this key + if _, ok := objectAtys[name.AsString()]; !ok { + continue + } + + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + + elems[name.AsString()] = val + } + + return cty.ObjectVal(elems), nil + } +} + +func conversionUnifyCollectionElements(elems map[string]cty.Value, path cty.Path, unsafe bool) (map[string]cty.Value, error) { + elemTypes := make([]cty.Type, 0, len(elems)) + for _, elem := range elems { + elemTypes = append(elemTypes, elem.Type()) + } + unifiedType, _ := unify(elemTypes, unsafe) + if unifiedType == cty.NilType { + } + + unifiedElems := make(map[string]cty.Value) + elemPath := append(path.Copy(), nil) + + for name, elem := range elems { + if elem.Type().Equals(unifiedType) { + unifiedElems[name] = elem + continue + } + conv := getConversion(elem.Type(), unifiedType, unsafe) + if conv == nil { + } + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.StringVal(name), + } + val, err := conv(elem, elemPath) + if err != nil { + return nil, err + } + unifiedElems[name] = val + } + + return unifiedElems, nil +} + +func conversionCheckMapElementTypes(elems map[string]cty.Value, path cty.Path) error { + elementType := cty.NilType + elemPath := append(path.Copy(), nil) + + for name, elem := range elems { + if elementType == cty.NilType { + elementType = elem.Type() + continue + } + if !elementType.Equals(elem.Type()) { + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.StringVal(name), + } + return elemPath.NewErrorf("%s is required", elementType.FriendlyName()) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go new file mode 100644 index 00000000..5f571da1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// dynamicFixup deals with just-in-time conversions of values that were +// input-typed as cty.DynamicPseudoType during analysis, ensuring that +// we end up with the desired output type once the value is known, or +// failing with an error if that is not possible. +// +// This is in the spirit of the cty philosophy of optimistically assuming that +// DynamicPseudoType values will become the intended value eventually, and +// dealing with any inconsistencies during final evaluation. +func dynamicFixup(wantType cty.Type) conversion { + return func(in cty.Value, path cty.Path) (cty.Value, error) { + ret, err := Convert(in, wantType) + if err != nil { + // Re-wrap this error so that the returned path is relative + // to the caller's original value, rather than relative to our + // conversion value here. + return cty.NilVal, path.NewError(err) + } + return ret, nil + } +} + +// dynamicPassthrough is an identity conversion that is used when the +// target type is DynamicPseudoType, indicating that the caller doesn't care +// which type is returned. +func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { + return in, nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go new file mode 100644 index 00000000..93743ca8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go @@ -0,0 +1,76 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionObjectToObject returns a conversion that will make the input +// object type conform to the output object type, if possible. +// +// Conversion is possible only if the output type is a subset of the input +// type, meaning that each attribute of the output type has a corresponding +// attribute in the input type where a recursive conversion is available. +// +// Shallow object conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit the above definition of "subset". +func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { + inAtys := in.AttributeTypes() + outAtys := out.AttributeTypes() + attrConvs := make(map[string]conversion) + + for name, outAty := range outAtys { + inAty, exists := inAtys[name] + if !exists { + // No conversion is available, then. + return nil + } + + if inAty.Equals(outAty) { + // No conversion needed, but we'll still record the attribute + // in our map for later reference. + attrConvs[name] = nil + continue + } + + attrConvs[name] = getConversion(inAty, outAty, unsafe) + if attrConvs[name] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the attribute + // conversions given in attrConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + attrVals := make(map[string]cty.Value, len(attrConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + for it := val.ElementIterator(); it.Next(); { + nameVal, val := it.Element() + var err error + + name := nameVal.AsString() + *pathStep = cty.GetAttrStep{ + Name: name, + } + + conv, exists := attrConvs[name] + if !exists { + continue + } + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + attrVals[name] = val + } + + return cty.ObjectVal(attrVals), nil + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go new file mode 100644 index 00000000..a5534441 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go @@ -0,0 +1,57 @@ +package convert + +import ( + "strings" + + "github.com/hashicorp/go-cty/cty" +) + +var stringTrue = cty.StringVal("true") +var stringFalse = cty.StringVal("false") + +var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ + cty.Number: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + f := val.AsBigFloat() + return cty.StringVal(f.Text('f', -1)), nil + }, + }, + cty.Bool: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + if val.True() { + return stringTrue, nil + } else { + return stringFalse, nil + } + }, + }, +} + +var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ + cty.String: { + cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { + v, err := cty.ParseNumberVal(val.AsString()) + if err != nil { + return cty.NilVal, path.NewErrorf("a number is required") + } + return v, nil + }, + cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { + switch val.AsString() { + case "true", "1": + return cty.True, nil + case "false", "0": + return cty.False, nil + default: + switch strings.ToLower(val.AsString()) { + case "true": + return cty.NilVal, path.NewErrorf("a bool is required; to convert from string, use lowercase \"true\"") + case "false": + return cty.NilVal, path.NewErrorf("a bool is required; to convert from string, use lowercase \"false\"") + default: + return cty.NilVal, path.NewErrorf("a bool is required") + } + } + }, + }, +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go new file mode 100644 index 00000000..d89ec380 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go @@ -0,0 +1,71 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionTupleToTuple returns a conversion that will make the input +// tuple type conform to the output tuple type, if possible. +// +// Conversion is possible only if the two tuple types have the same number +// of elements and the corresponding elements by index can be converted. +// +// Shallow tuple conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit which element type conversions are possible. +func conversionTupleToTuple(in, out cty.Type, unsafe bool) conversion { + inEtys := in.TupleElementTypes() + outEtys := out.TupleElementTypes() + + if len(inEtys) != len(outEtys) { + return nil // no conversion is possible + } + + elemConvs := make([]conversion, len(inEtys)) + + for i, outEty := range outEtys { + inEty := inEtys[i] + + if inEty.Equals(outEty) { + // No conversion needed, so we can leave this one nil. + continue + } + + elemConvs[i] = getConversion(inEty, outEty, unsafe) + if elemConvs[i] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the element + // conversions given in elemConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elemVals := make([]cty.Value, len(elemConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, val := it.Element() + var err error + + *pathStep = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elemVals[i] = val + } + + return cty.TupleVal(elemVals), nil + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go b/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go new file mode 100644 index 00000000..2037299b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go @@ -0,0 +1,15 @@ +// Package convert contains some routines for converting between cty types. +// The intent of providing this package is to encourage applications using +// cty to have consistent type conversion behavior for maximal interoperability +// when Values pass from one application to another. +// +// The conversions are categorized into two categories. "Safe" conversions are +// ones that are guaranteed to succeed if given a non-null value of the +// appropriate source type. "Unsafe" conversions, on the other hand, are valid +// for only a subset of input values, and thus may fail with an error when +// called for values outside of that valid subset. +// +// The functions whose names end in Unsafe support all of the conversions that +// are supported by the corresponding functions whose names do not have that +// suffix, and then additional unsafe conversions as well. +package convert diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go b/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go new file mode 100644 index 00000000..72f307f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go @@ -0,0 +1,220 @@ +package convert + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/go-cty/cty" +) + +// MismatchMessage is a helper to return an English-language description of +// the differences between got and want, phrased as a reason why got does +// not conform to want. +// +// This function does not itself attempt conversion, and so it should generally +// be used only after a conversion has failed, to report the conversion failure +// to an English-speaking user. The result will be confusing got is actually +// conforming to or convertable to want. +// +// The shorthand helper function Convert uses this function internally to +// produce its error messages, so callers of that function do not need to +// also use MismatchMessage. +// +// This function is similar to Type.TestConformance, but it is tailored to +// describing conversion failures and so the messages it generates relate +// specifically to the conversion rules implemented in this package. +func MismatchMessage(got, want cty.Type) string { + switch { + + case got.IsObjectType() && want.IsObjectType(): + // If both types are object types then we may be able to say something + // about their respective attributes. + return mismatchMessageObjects(got, want) + + case got.IsTupleType() && want.IsListType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to list failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all list elements must have the same type" + + case got.IsTupleType() && want.IsSetType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to set failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all set elements must have the same type" + + case got.IsObjectType() && want.IsMapType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from object to map failed then it's because we couldn't + // find a common type to convert all of the object attributes to. + return "all map elements must have the same type" + + case (got.IsTupleType() || got.IsObjectType()) && want.IsCollectionType(): + return mismatchMessageCollectionsFromStructural(got, want) + + case got.IsCollectionType() && want.IsCollectionType(): + return mismatchMessageCollectionsFromCollections(got, want) + + default: + // If we have nothing better to say, we'll just state what was required. + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageObjects(got, want cty.Type) string { + // Per our conversion rules, "got" is allowed to be a superset of "want", + // and so we'll produce error messages here under that assumption. + gotAtys := got.AttributeTypes() + wantAtys := want.AttributeTypes() + + // If we find missing attributes then we'll report those in preference, + // but if not then we will report a maximum of one non-conforming + // attribute, just to keep our messages relatively terse. + // We'll also prefer to report a recursive type error from an _unsafe_ + // conversion over a safe one, because these are subjectively more + // "serious". + var missingAttrs []string + var unsafeMismatchAttr string + var safeMismatchAttr string + + for name, wantAty := range wantAtys { + gotAty, exists := gotAtys[name] + if !exists { + missingAttrs = append(missingAttrs, name) + continue + } + + // We'll now try to convert these attributes in isolation and + // see if we have a nested conversion error to report. + // We'll try an unsafe conversion first, and then fall back on + // safe if unsafe is possible. + + // If we already have an unsafe mismatch attr error then we won't bother + // hunting for another one. + if unsafeMismatchAttr != "" { + continue + } + if conv := GetConversionUnsafe(gotAty, wantAty); conv == nil { + unsafeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + + // If we already have a safe mismatch attr error then we won't bother + // hunting for another one. + if safeMismatchAttr != "" { + continue + } + if conv := GetConversion(gotAty, wantAty); conv == nil { + safeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + } + + // We should now have collected at least one problem. If we have more than + // one then we'll use our preference order to decide what is most important + // to report. + switch { + + case len(missingAttrs) != 0: + sort.Strings(missingAttrs) + switch len(missingAttrs) { + case 1: + return fmt.Sprintf("attribute %q is required", missingAttrs[0]) + case 2: + return fmt.Sprintf("attributes %q and %q are required", missingAttrs[0], missingAttrs[1]) + default: + sort.Strings(missingAttrs) + var buf bytes.Buffer + for _, name := range missingAttrs[:len(missingAttrs)-1] { + fmt.Fprintf(&buf, "%q, ", name) + } + fmt.Fprintf(&buf, "and %q", missingAttrs[len(missingAttrs)-1]) + return fmt.Sprintf("attributes %s are required", buf.Bytes()) + } + + case unsafeMismatchAttr != "": + return unsafeMismatchAttr + + case safeMismatchAttr != "": + return safeMismatchAttr + + default: + // We should never get here, but if we do then we'll return + // just a generic message. + return "incorrect object attributes" + } +} + +func mismatchMessageCollectionsFromStructural(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsObjectType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll move on to checking + // individual elements. + wantEty := want.ElementType() + switch { + case got.IsTupleType(): + for i, gotEty := range got.TupleElementTypes() { + if gotEty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotEty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %d: %s", i, MismatchMessage(gotEty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + case got.IsObjectType(): + for name, gotAty := range got.AttributeTypes() { + if gotAty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotAty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %q: %s", name, MismatchMessage(gotAty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + default: + // Should not be possible to get here since we only call this function + // with got as structural types, but... + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageCollectionsFromCollections(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsMapType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll check the element types. + gotEty := got.ElementType() + wantEty := want.ElementType() + noun := "element type" + switch { + case want.IsListType(): + noun = "list element type" + case want.IsSetType(): + noun = "set element type" + case want.IsMapType(): + noun = "map element type" + } + return fmt.Sprintf("incorrect %s: %s", noun, MismatchMessage(gotEty, wantEty)) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/public.go b/vendor/github.com/hashicorp/go-cty/cty/convert/public.go new file mode 100644 index 00000000..3b50a692 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/public.go @@ -0,0 +1,83 @@ +package convert + +import ( + "errors" + + "github.com/hashicorp/go-cty/cty" +) + +// This file contains the public interface of this package, which is intended +// to be a small, convenient interface designed for easy integration into +// a hypothetical language type checker and interpreter. + +// Conversion is a named function type representing a conversion from a +// value of one type to a value of another type. +// +// The source type for a conversion is always the source type given to +// the function that returned the Conversion, but there is no way to recover +// that from a Conversion value itself. If a Conversion is given a value +// that is not of its expected type (with the exception of DynamicPseudoType, +// which is always supported) then the function may panic or produce undefined +// results. +type Conversion func(in cty.Value) (out cty.Value, err error) + +// GetConversion returns a Conversion between the given in and out Types if +// a safe one is available, or returns nil otherwise. +func GetConversion(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, false)) +} + +// GetConversionUnsafe returns a Conversion between the given in and out Types +// if either a safe or unsafe one is available, or returns nil otherwise. +func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, true)) +} + +// Convert returns the result of converting the given value to the given type +// if an safe or unsafe conversion is available, or returns an error if such a +// conversion is impossible. +// +// This is a convenience wrapper around calling GetConversionUnsafe and then +// immediately passing the given value to the resulting function. +func Convert(in cty.Value, want cty.Type) (cty.Value, error) { + if in.Type().Equals(want) { + return in, nil + } + + conv := GetConversionUnsafe(in.Type(), want) + if conv == nil { + return cty.NilVal, errors.New(MismatchMessage(in.Type(), want)) + } + return conv(in) +} + +// Unify attempts to find the most general type that can be converted from +// all of the given types. If this is possible, that type is returned along +// with a slice of necessary conversions for some of the given types. +// +// If no common supertype can be found, this function returns cty.NilType and +// a nil slice. +// +// If a common supertype *can* be found, the returned slice will always be +// non-nil and will contain a non-nil conversion for each given type that +// needs to be converted, with indices corresponding to the input slice. +// Any given type that does *not* need conversion (because it is already of +// the appropriate type) will have a nil Conversion. +// +// cty.DynamicPseudoType is, as usual, a special case. If the given type list +// contains a mixture of dynamic and non-dynamic types, the dynamic types are +// disregarded for type selection and a conversion is returned for them that +// will attempt a late conversion of the given value to the target type, +// failing with a conversion error if the eventual concrete type is not +// compatible. If *all* given types are DynamicPseudoType, or in the +// degenerate case of an empty slice of types, the returned type is itself +// cty.DynamicPseudoType and no conversions are attempted. +func Unify(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, false) +} + +// UnifyUnsafe is the same as Unify except that it may return unsafe +// conversions in situations where a safe conversion isn't also available. +func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, true) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go b/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go new file mode 100644 index 00000000..8a9c3276 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go @@ -0,0 +1,69 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// sortTypes produces an ordering of the given types that serves as a +// preference order for the result of unification of the given types. +// The return value is a slice of indices into the given slice, and will +// thus always be the same length as the given slice. +// +// The goal is that the most general of the given types will appear first +// in the ordering. If there are uncomparable pairs of types in the list +// then they will appear in an undefined order, and the unification pass +// will presumably then fail. +func sortTypes(tys []cty.Type) []int { + l := len(tys) + + // First we build a graph whose edges represent "more general than", + // which we will then do a topological sort of. + edges := make([][]int, l) + for i := 0; i < (l - 1); i++ { + for j := i + 1; j < l; j++ { + cmp := compareTypes(tys[i], tys[j]) + switch { + case cmp < 0: + edges[i] = append(edges[i], j) + case cmp > 0: + edges[j] = append(edges[j], i) + } + } + } + + // Compute the in-degree of each node + inDegree := make([]int, l) + for _, outs := range edges { + for _, j := range outs { + inDegree[j]++ + } + } + + // The array backing our result will double as our queue for visiting + // the nodes, with the queue slice moving along this array until it + // is empty and positioned at the end of the array. Thus our visiting + // order is also our result order. + result := make([]int, l) + queue := result[0:0] + + // Initialize the queue with any item of in-degree 0, preserving + // their relative order. + for i, n := range inDegree { + if n == 0 { + queue = append(queue, i) + } + } + + for len(queue) != 0 { + i := queue[0] + queue = queue[1:] + for _, j := range edges[i] { + inDegree[j]-- + if inDegree[j] == 0 { + queue = append(queue, j) + } + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go b/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go new file mode 100644 index 00000000..b2a3bbe5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go @@ -0,0 +1,357 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// The current unify implementation is somewhat inefficient, but we accept this +// under the assumption that it will generally be used with small numbers of +// types and with types of reasonable complexity. However, it does have a +// "happy path" where all of the given types are equal. +// +// This function is likely to have poor performance in cases where any given +// types are very complex (lots of deeply-nested structures) or if the list +// of types itself is very large. In particular, it will walk the nested type +// structure under the given types several times, especially when given a +// list of types for which unification is not possible, since each permutation +// will be tried to determine that result. +func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + if len(types) == 0 { + // Degenerate case + return cty.NilType, nil + } + + // If all of the given types are of the same structural kind, we may be + // able to construct a new type that they can all be unified to, even if + // that is not one of the given types. We must try this before the general + // behavior below because in unsafe mode we can convert an object type to + // a subset of that type, which would be a much less useful conversion for + // unification purposes. + { + mapCt := 0 + objectCt := 0 + tupleCt := 0 + dynamicCt := 0 + for _, ty := range types { + switch { + case ty.IsMapType(): + mapCt++ + case ty.IsObjectType(): + objectCt++ + case ty.IsTupleType(): + tupleCt++ + case ty == cty.DynamicPseudoType: + dynamicCt++ + default: + break + } + } + switch { + case mapCt > 0 && (mapCt+dynamicCt) == len(types): + return unifyMapTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && (objectCt+dynamicCt) == len(types): + return unifyObjectTypes(types, unsafe, dynamicCt > 0) + case tupleCt > 0 && (tupleCt+dynamicCt) == len(types): + return unifyTupleTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && tupleCt > 0: + // Can never unify object and tuple types since they have incompatible kinds + return cty.NilType, nil + } + } + + prefOrder := sortTypes(types) + + // sortTypes gives us an order where earlier items are preferable as + // our result type. We'll now walk through these and choose the first + // one we encounter for which conversions exist for all source types. + conversions := make([]Conversion, len(types)) +Preferences: + for _, wantTypeIdx := range prefOrder { + wantType := types[wantTypeIdx] + for i, tryType := range types { + if i == wantTypeIdx { + // Don't need to convert our wanted type to itself + conversions[i] = nil + continue + } + + if tryType.Equals(wantType) { + conversions[i] = nil + continue + } + + if unsafe { + conversions[i] = GetConversionUnsafe(tryType, wantType) + } else { + conversions[i] = GetConversion(tryType, wantType) + } + + if conversions[i] == nil { + // wantType is not a suitable unification type, so we'll + // try the next one in our preference order. + continue Preferences + } + } + + return wantType, conversions + } + + // If we fall out here, no unification is possible + return cty.NilType, nil +} + +func unifyMapTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + elemTypes := make([]cty.Type, 0, len(types)) + for _, ty := range types { + elemTypes = append(elemTypes, ty.ElementType()) + } + retElemType, _ := unify(elemTypes, unsafe) + if retElemType == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(retElemType) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return cty.NilType, nil + } + } + + return retTy, conversions +} + +func unifyObjectTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given object types have the same set of attribute names + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given object types have different attribute names or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the attribute types together to produce a map type. + // + // Our unification behavior is intentionally stricter than our conversion + // behavior for subset object types because user intent is different with + // unification use-cases: it makes sense to allow {"foo":true} to convert + // to emptyobjectval, but unifying an object with an attribute with the + // empty object type should be an error because unifying to the empty + // object type would be suprising and useless. + + firstAttrs := types[0].AttributeTypes() + for _, ty := range types[1:] { + thisAttrs := ty.AttributeTypes() + if len(thisAttrs) != len(firstAttrs) { + // If number of attributes is different then there can be no + // object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + for name := range thisAttrs { + if _, ok := firstAttrs[name]; !ok { + // If attribute names don't exactly match then there can be + // no object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + } + } + + // If we get here then we've proven that all of the given object types + // have exactly the same set of attribute names, though the types may + // differ. + retAtys := make(map[string]cty.Type) + atysAcross := make([]cty.Type, len(types)) + for name := range firstAttrs { + for i, ty := range types { + atysAcross[i] = ty.AttributeType(name) + } + retAtys[name], _ = unify(atysAcross, unsafe) + if retAtys[name] == cty.NilType { + // Cannot unify this attribute alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Object(retAtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyObjectTypesToMap(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyObjectTypes, where we see if we can + // construct a map type that can accept all of the attribute types. + + var atys []cty.Type + for _, ty := range types { + for _, aty := range ty.AttributeTypes() { + atys = append(atys, aty) + } + } + + ety, _ := unify(atys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + return cty.NilType, nil + } + } + return retTy, conversions +} + +func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given tuple types have the same sequence of element types + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given tuple types have different element types or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the elements types together to produce a list type. + + firstEtys := types[0].TupleElementTypes() + for _, ty := range types[1:] { + thisEtys := ty.TupleElementTypes() + if len(thisEtys) != len(firstEtys) { + // If number of elements is different then there can be no + // tuple type in common. + return unifyTupleTypesToList(types, unsafe) + } + } + + // If we get here then we've proven that all of the given tuple types + // have the same number of elements, though the types may differ. + retEtys := make([]cty.Type, len(firstEtys)) + atysAcross := make([]cty.Type, len(types)) + for idx := range firstEtys { + for tyI, ty := range types { + atysAcross[tyI] = ty.TupleElementTypes()[idx] + } + retEtys[idx], _ = unify(atysAcross, unsafe) + if retEtys[idx] == cty.NilType { + // Cannot unify this element alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Tuple(retEtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyTupleTypesToList(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyTupleTypes, where we see if we can + // construct a list type that can accept all of the element types. + + var etys []cty.Type + for _, ty := range types { + for _, ety := range ty.TupleElementTypes() { + etys = append(etys, ety) + } + } + + ety, _ := unify(etys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.List(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + return retTy, conversions +} + +func unifyAllAsDynamic(types []cty.Type) (cty.Type, []Conversion) { + conversions := make([]Conversion, len(types)) + for i := range conversions { + conversions[i] = func(cty.Value) (cty.Value, error) { + return cty.DynamicVal, nil + } + } + return cty.DynamicPseudoType, conversions +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/doc.go b/vendor/github.com/hashicorp/go-cty/cty/doc.go new file mode 100644 index 00000000..d31f0547 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/doc.go @@ -0,0 +1,18 @@ +// Package cty (pronounced see-tie) provides some infrastructure for a type +// system that might be useful for applications that need to represent +// configuration values provided by the user whose types are not known +// at compile time, particularly if the calling application also allows +// such values to be used in expressions. +// +// The type system consists of primitive types Number, String and Bool, as +// well as List and Map collection types and Object types that can have +// arbitrarily-typed sets of attributes. +// +// A set of operations is defined on these types, which is accessible via +// the wrapper struct Value, which annotates the raw, internal representation +// of a value with its corresponding type. +// +// This package is oriented towards being a building block for configuration +// languages used to bootstrap an application. It is not optimized for use +// in tight loops where CPU time or memory pressure are a concern. +package cty diff --git a/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go b/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go new file mode 100644 index 00000000..31567e76 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go @@ -0,0 +1,194 @@ +package cty + +import ( + "sort" + + "github.com/hashicorp/go-cty/cty/set" +) + +// ElementIterator is the interface type returned by Value.ElementIterator to +// allow the caller to iterate over elements of a collection-typed value. +// +// Its usage pattern is as follows: +// +// it := val.ElementIterator() +// for it.Next() { +// key, val := it.Element() +// // ... +// } +type ElementIterator interface { + Next() bool + Element() (key Value, value Value) +} + +func canElementIterator(val Value) bool { + switch { + case val.IsMarked(): + return false + case val.ty.IsListType(): + return true + case val.ty.IsMapType(): + return true + case val.ty.IsSetType(): + return true + case val.ty.IsTupleType(): + return true + case val.ty.IsObjectType(): + return true + default: + return false + } +} + +func elementIterator(val Value) ElementIterator { + val.assertUnmarked() + switch { + case val.ty.IsListType(): + return &listElementIterator{ + ety: val.ty.ElementType(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsMapType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same input map. + rawMap := val.v.(map[string]interface{}) + keys := make([]string, 0, len(rawMap)) + for key := range rawMap { + keys = append(keys, key) + } + sort.Strings(keys) + + return &mapElementIterator{ + ety: val.ty.ElementType(), + vals: rawMap, + keys: keys, + idx: -1, + } + case val.ty.IsSetType(): + rawSet := val.v.(set.Set) + return &setElementIterator{ + ety: val.ty.ElementType(), + setIt: rawSet.Iterator(), + } + case val.ty.IsTupleType(): + return &tupleElementIterator{ + etys: val.ty.TupleElementTypes(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsObjectType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same object type. + atys := val.ty.AttributeTypes() + keys := make([]string, 0, len(atys)) + for key := range atys { + keys = append(keys, key) + } + sort.Strings(keys) + + return &objectElementIterator{ + atys: atys, + vals: val.v.(map[string]interface{}), + attrNames: keys, + idx: -1, + } + default: + panic("attempt to iterate on non-collection, non-tuple type") + } +} + +type listElementIterator struct { + ety Type + vals []interface{} + idx int +} + +func (it *listElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.ety, + v: it.vals[i], + } +} + +func (it *listElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type mapElementIterator struct { + ety Type + vals map[string]interface{} + keys []string + idx int +} + +func (it *mapElementIterator) Element() (Value, Value) { + key := it.keys[it.idx] + return StringVal(key), Value{ + ty: it.ety, + v: it.vals[key], + } +} + +func (it *mapElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.keys) +} + +type setElementIterator struct { + ety Type + setIt *set.Iterator +} + +func (it *setElementIterator) Element() (Value, Value) { + val := Value{ + ty: it.ety, + v: it.setIt.Value(), + } + return val, val +} + +func (it *setElementIterator) Next() bool { + return it.setIt.Next() +} + +type tupleElementIterator struct { + etys []Type + vals []interface{} + idx int +} + +func (it *tupleElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.etys[i], + v: it.vals[i], + } +} + +func (it *tupleElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type objectElementIterator struct { + atys map[string]Type + vals map[string]interface{} + attrNames []string + idx int +} + +func (it *objectElementIterator) Element() (Value, Value) { + key := it.attrNames[it.idx] + return StringVal(key), Value{ + ty: it.atys[key], + v: it.vals[key], + } +} + +func (it *objectElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.attrNames) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/error.go b/vendor/github.com/hashicorp/go-cty/cty/error.go new file mode 100644 index 00000000..dd139f72 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/error.go @@ -0,0 +1,55 @@ +package cty + +import ( + "fmt" +) + +// PathError is a specialization of error that represents where in a +// potentially-deep data structure an error occured, using a Path. +type PathError struct { + error + Path Path +} + +func errorf(path Path, f string, args ...interface{}) error { + // We need to copy the Path because often our caller builds it by + // continually mutating the same underlying buffer. + sPath := make(Path, len(path)) + copy(sPath, path) + return PathError{ + error: fmt.Errorf(f, args...), + Path: sPath, + } +} + +// NewErrorf creates a new PathError for the current path by passing the +// given format and arguments to fmt.Errorf and then wrapping the result +// similarly to NewError. +func (p Path) NewErrorf(f string, args ...interface{}) error { + return errorf(p, f, args...) +} + +// NewError creates a new PathError for the current path, wrapping the given +// error. +func (p Path) NewError(err error) error { + // if we're being asked to wrap an existing PathError then our new + // PathError will be the concatenation of the two paths, ensuring + // that we still get a single flat PathError that's thus easier for + // callers to deal with. + perr, wrappingPath := err.(PathError) + pathLen := len(p) + if wrappingPath { + pathLen = pathLen + len(perr.Path) + } + + sPath := make(Path, pathLen) + copy(sPath, p) + if wrappingPath { + copy(sPath[len(p):], perr.Path) + } + + return PathError{ + error: err, + Path: sPath, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gob.go b/vendor/github.com/hashicorp/go-cty/cty/gob.go new file mode 100644 index 00000000..80929aa5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gob.go @@ -0,0 +1,204 @@ +package cty + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "math/big" + + "github.com/hashicorp/go-cty/cty/set" +) + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Values to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent values of capsule types in gob, +// because the types themselves cannot be represented. +func (val Value) GobEncode() ([]byte, error) { + if val.IsMarked() { + return nil, errors.New("value is marked") + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gv := gobValue{ + Version: 0, + Ty: val.ty, + V: val.v, + } + + err := enc.Encode(gv) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Value: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementation of the gob.GobDecoder interface, which +// inverts the operation performed by GobEncode. See the documentation of +// GobEncode for considerations when using cty.Value instances with gob. +func (val *Value) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gv gobValue + err := dec.Decode(&gv) + if err != nil { + return fmt.Errorf("error decoding cty.Value: %s", err) + } + if gv.Version != 0 { + return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) + } + + // Because big.Float.GobEncode is implemented with a pointer reciever, + // gob encoding of an interface{} containing a *big.Float value does not + // round-trip correctly, emerging instead as a non-pointer big.Float. + // The rest of cty expects all number values to be represented by + // *big.Float, so we'll fix that up here. + gv.V = gobDecodeFixNumberPtr(gv.V, gv.Ty) + + val.ty = gv.Ty + val.v = gv.V + + return nil +} + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Types to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent capsule types in gob. +func (t Type) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gt := gobType{ + Version: 0, + Impl: t.typeImpl, + } + + err := enc.Encode(gt) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Type: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementatino of the gob.GobDecoder interface, which +// reverses the encoding performed by GobEncode to allow types to be recovered +// from gob buffers. +func (t *Type) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gt gobType + err := dec.Decode(>) + if err != nil { + return fmt.Errorf("error decoding cty.Type: %s", err) + } + if gt.Version != 0 { + return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) + } + + t.typeImpl = gt.Impl + + return nil +} + +// Capsule types cannot currently be gob-encoded, because they rely on pointer +// equality and we have no way to recover the original pointer on decode. +func (t *capsuleType) GobEncode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +func (t *capsuleType) GobDecode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +type gobValue struct { + Version int + Ty Type + V interface{} +} + +type gobType struct { + Version int + Impl typeImpl +} + +type gobCapsuleTypeImpl struct { +} + +// goDecodeFixNumberPtr fixes an unfortunate quirk of round-tripping cty.Number +// values through gob: the big.Float.GobEncode method is implemented on a +// pointer receiver, and so it loses the "pointer-ness" of the value on +// encode, causing the values to emerge the other end as big.Float rather than +// *big.Float as we expect elsewhere in cty. +// +// The implementation of gobDecodeFixNumberPtr mutates the given raw value +// during its work, and may either return the same value mutated or a new +// value. Callers must no longer use whatever value they pass as "raw" after +// this function is called. +func gobDecodeFixNumberPtr(raw interface{}, ty Type) interface{} { + // Unfortunately we need to work recursively here because number values + // might be embedded in structural or collection type values. + + switch { + case ty.Equals(Number): + if bf, ok := raw.(big.Float); ok { + return &bf // wrap in pointer + } + case ty.IsMapType() && ty.ElementType().Equals(Number): + if m, ok := raw.(map[string]interface{}); ok { + for k, v := range m { + m[k] = gobDecodeFixNumberPtr(v, ty.ElementType()) + } + } + case ty.IsListType() && ty.ElementType().Equals(Number): + if s, ok := raw.([]interface{}); ok { + for i, v := range s { + s[i] = gobDecodeFixNumberPtr(v, ty.ElementType()) + } + } + case ty.IsSetType() && ty.ElementType().Equals(Number): + if s, ok := raw.(set.Set); ok { + newS := set.NewSet(s.Rules()) + for it := s.Iterator(); it.Next(); { + newV := gobDecodeFixNumberPtr(it.Value(), ty.ElementType()) + newS.Add(newV) + } + return newS + } + case ty.IsObjectType(): + if m, ok := raw.(map[string]interface{}); ok { + for k, v := range m { + aty := ty.AttributeType(k) + m[k] = gobDecodeFixNumberPtr(v, aty) + } + } + case ty.IsTupleType(): + if s, ok := raw.([]interface{}); ok { + for i, v := range s { + ety := ty.TupleElementType(i) + s[i] = gobDecodeFixNumberPtr(v, ety) + } + } + } + + return raw +} + +// gobDecodeFixNumberPtrVal is a helper wrapper around gobDecodeFixNumberPtr +// that works with already-constructed values. This is primarily for testing, +// to fix up intentionally-invalid number values for the parts of the test +// code that need them to be valid, such as calling GoString on them. +func gobDecodeFixNumberPtrVal(v Value) Value { + raw := gobDecodeFixNumberPtr(v.v, v.ty) + return Value{ + v: raw, + ty: v.ty, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go new file mode 100644 index 00000000..a5177d22 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go @@ -0,0 +1,7 @@ +// Package gocty deals with converting between cty Values and native go +// values. +// +// It operates under a similar principle to the encoding/json and +// encoding/xml packages in the standard library, using reflection to +// populate native Go data structures from cty values and vice-versa. +package gocty diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go new file mode 100644 index 00000000..0677a079 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go @@ -0,0 +1,43 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/set" +) + +var valueType = reflect.TypeOf(cty.Value{}) +var typeType = reflect.TypeOf(cty.Type{}) + +var setType = reflect.TypeOf(set.Set{}) + +var bigFloatType = reflect.TypeOf(big.Float{}) +var bigIntType = reflect.TypeOf(big.Int{}) + +var emptyInterfaceType = reflect.TypeOf(interface{}(nil)) + +var stringType = reflect.TypeOf("") + +// structTagIndices interrogates the fields of the given type (which must +// be a struct type, or we'll panic) and returns a map from the cty +// attribute names declared via struct tags to the indices of the +// fields holding those tags. +// +// This function will panic if two fields within the struct are tagged with +// the same cty attribute name. +func structTagIndices(st reflect.Type) map[string]int { + ct := st.NumField() + ret := make(map[string]int, ct) + + for i := 0; i < ct; i++ { + field := st.Field(i) + attrName := field.Tag.Get("cty") + if attrName != "" { + ret[attrName] = i + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go new file mode 100644 index 00000000..fc35c169 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go @@ -0,0 +1,548 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty/set" +) + +// ToCtyValue produces a cty.Value from a Go value. The result will conform +// to the given type, or an error will be returned if this is not possible. +// +// The target type serves as a hint to resolve ambiguities in the mapping. +// For example, the Go type set.Set tells us that the value is a set but +// does not describe the set's element type. This also allows for convenient +// conversions, such as populating a set from a slice rather than having to +// first explicitly instantiate a set.Set. +// +// The audience of this function is assumed to be the developers of Go code +// that is integrating with cty, and thus the error messages it returns are +// presented from Go's perspective. These messages are thus not appropriate +// for display to end-users. An error returned from ToCtyValue represents a +// bug in the calling program, not user error. +func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) { + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time toCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given Type is. + path := make(cty.Path, 0) + return toCtyValue(reflect.ValueOf(val), ty, path) +} + +func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + if val != (reflect.Value{}) && val.Type().AssignableTo(valueType) { + // If the source value is a cty.Value then we'll try to just pass + // through to the target type directly. + return toCtyPassthrough(val, ty, path) + } + + switch ty { + case cty.Bool: + return toCtyBool(val, path) + case cty.Number: + return toCtyNumber(val, path) + case cty.String: + return toCtyString(val, path) + case cty.DynamicPseudoType: + return toCtyDynamic(val, path) + } + + switch { + case ty.IsListType(): + return toCtyList(val, ty.ElementType(), path) + case ty.IsMapType(): + return toCtyMap(val, ty.ElementType(), path) + case ty.IsSetType(): + return toCtySet(val, ty.ElementType(), path) + case ty.IsObjectType(): + return toCtyObject(val, ty.AttributeTypes(), path) + case ty.IsTupleType(): + return toCtyTuple(val, ty.TupleElementTypes(), path) + case ty.IsCapsuleType(): + return toCtyCapsule(val, ty, path) + } + + // We should never fall out here + return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty) +} + +func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Bool), nil + } + + switch val.Kind() { + + case reflect.Bool: + return cty.BoolVal(val.Bool()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind()) + + } + +} + +func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Number), nil + } + + switch val.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.NumberIntVal(val.Int()), nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.NumberUIntVal(val.Uint()), nil + + case reflect.Float32, reflect.Float64: + return cty.NumberFloatVal(val.Float()), nil + + case reflect.Struct: + if val.Type().AssignableTo(bigIntType) { + bigInt := val.Interface().(big.Int) + bigFloat := (&big.Float{}).SetInt(&bigInt) + val = reflect.ValueOf(*bigFloat) + } + + if val.Type().AssignableTo(bigFloatType) { + bigFloat := val.Interface().(big.Float) + return cty.NumberVal(&bigFloat), nil + } + + fallthrough + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind()) + + } + +} + +func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.String), nil + } + + switch val.Kind() { + + case reflect.String: + return cty.StringVal(val.String()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind()) + + } + +} + +func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.List(ety)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.List(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.ListValEmpty(ety), nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, val.Len()) + for i := range vals { + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ListVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety)) + + } +} + +func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Map(ety)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Map(ety)), nil + } + + if val.Len() == 0 { + return cty.MapValEmpty(ety), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make(map[string]cty.Value, val.Len()) + for _, kv := range val.MapKeys() { + k := kv.String() + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.MapVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety)) + + } +} + +func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Set(ety)), nil + } + + var vals []cty.Value + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Set(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, val.Len()) + for i := range vals { + var err error + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + case reflect.Struct: + + if !val.Type().AssignableTo(setType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety)) + } + + rawSet := val.Interface().(set.Set) + inVals := rawSet.Values() + + if len(inVals) == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, len(inVals)) + for i := range inVals { + var err error + vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety)) + + } + + return cty.SetVal(vals), nil +} + +func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + haveKeys := make(map[string]struct{}, val.Len()) + for _, kv := range val.MapKeys() { + haveKeys[kv.String()] = struct{}{} + } + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + var err error + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if _, have := haveKeys[k]; !have { + vals[k] = cty.NullVal(at) + continue + } + + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + case reflect.Struct: + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + attrFields := structTagIndices(val.Type()) + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if fieldIdx, have := attrFields[k]; have { + var err error + vals[k], err = toCtyValue(val.Field(fieldIdx), at, path) + if err != nil { + return cty.NilVal, err + } + } else { + vals[k] = cty.NullVal(at) + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes)) + + } +} + +func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + if val.Len() != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + case reflect.Struct: + fieldCount := val.Type().NumField() + if fieldCount != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Field(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes)) + + } +} + +func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(capsuleType), nil + } + + if val.Kind() != reflect.Ptr { + if !val.CanAddr() { + return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType) + } + + val = val.Addr() + } + + if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) { + return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType) + } + + return cty.CapsuleVal(capsuleType, val.Interface()), nil +} + +func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.DynamicPseudoType), nil + } + + switch val.Kind() { + + case reflect.Struct: + if !val.Type().AssignableTo(valueType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type()) + } + + return val.Interface().(cty.Value), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind()) + + } + +} + +func toCtyPassthrough(wrappedVal reflect.Value, wantTy cty.Type, path cty.Path) (cty.Value, error) { + if wrappedVal = toCtyUnwrapPointer(wrappedVal); !wrappedVal.IsValid() { + return cty.NullVal(wantTy), nil + } + + givenVal := wrappedVal.Interface().(cty.Value) + + val, err := convert.Convert(givenVal, wantTy) + if err != nil { + return cty.NilVal, path.NewErrorf("unsuitable value: %s", err) + } + return val, nil +} + +// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three +// possible outcomes: +// +// - Given value isn't a pointer, so it's just returned as-is. +// - Given value is a non-nil pointer, in which case it is dereferenced +// and the result returned. +// - Given value is a nil pointer, in which case an invalid value is returned. +// +// For nested pointer types, like **int, they are all dereferenced in turn +// until a non-pointer value is found, or until a nil pointer is encountered. +func toCtyUnwrapPointer(val reflect.Value) reflect.Value { + for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return reflect.Value{} + } + + val = val.Elem() + } + + return val +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go new file mode 100644 index 00000000..404faba1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go @@ -0,0 +1,686 @@ +package gocty + +import ( + "math" + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" +) + +// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer, +// using a fixed set of conversion rules. +// +// This function considers its audience to be the creator of the cty Value +// given, and thus the error messages it generates are (unlike with ToCtyValue) +// presented in cty terminology that is generally appropriate to return to +// end-users in applications where cty data structures are built from +// user-provided configuration. In particular this means that if incorrect +// target types are provided by the calling application the resulting error +// messages are likely to be confusing, since we assume that the given target +// type is correct and the cty.Value is where the error lies. +// +// If an error is returned, the target data structure may have been partially +// populated, but the degree to which this is true is an implementation +// detail that the calling application should not rely on. +// +// The function will panic if given a non-pointer as the Go value target, +// since that is considered to be a bug in the calling program. +func FromCtyValue(val cty.Value, target interface{}) error { + tVal := reflect.ValueOf(target) + if tVal.Kind() != reflect.Ptr { + panic("target value is not a pointer") + } + if tVal.IsNil() { + panic("target value is nil pointer") + } + + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time fromCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given cty.Value is. + path := make(cty.Path, 0) + return fromCtyValue(val, tVal, path) +} + +func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error { + ty := val.Type() + + deepTarget := fromCtyPopulatePtr(target, false) + + // If we're decoding into a cty.Value then we just pass through the + // value as-is, to enable partial decoding. This is the only situation + // where unknown values are permitted. + if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) { + deepTarget.Set(reflect.ValueOf(val)) + return nil + } + + // Lists and maps can be nil without indirection, but everything else + // requires a pointer and we set it immediately to nil. + // We also make an exception for capsule types because we want to handle + // pointers specially for these. + // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while + // other types can assume no nulls after this point.) + if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() { + target = fromCtyPopulatePtr(target, true) + if target.Kind() != reflect.Ptr { + return path.NewErrorf("null value is not allowed") + } + + target.Set(reflect.Zero(target.Type())) + return nil + } + + target = deepTarget + + if !val.IsKnown() { + return path.NewErrorf("value must be known") + } + + switch ty { + case cty.Bool: + return fromCtyBool(val, target, path) + case cty.Number: + return fromCtyNumber(val, target, path) + case cty.String: + return fromCtyString(val, target, path) + } + + switch { + case ty.IsListType(): + return fromCtyList(val, target, path) + case ty.IsMapType(): + return fromCtyMap(val, target, path) + case ty.IsSetType(): + return fromCtySet(val, target, path) + case ty.IsObjectType(): + return fromCtyObject(val, target, path) + case ty.IsTupleType(): + return fromCtyTuple(val, target, path) + case ty.IsCapsuleType(): + return fromCtyCapsule(val, target, path) + } + + // We should never fall out here; reaching here indicates a bug in this + // function. + return path.NewErrorf("unsupported source type %#v", ty) +} + +func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Bool: + target.SetBool(val.True()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error { + bf := val.AsBigFloat() + + switch target.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fromCtyNumberInt(bf, target, path) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fromCtyNumberUInt(bf, target, path) + + case reflect.Float32, reflect.Float64: + return fromCtyNumberFloat(bf, target, path) + + case reflect.Struct: + return fromCtyNumberBig(bf, target, path) + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var min int64 + var max int64 + switch target.Type().Bits() { + case 8: + min = math.MinInt8 + max = math.MaxInt8 + case 16: + min = math.MinInt16 + max = math.MaxInt16 + case 32: + min = math.MinInt32 + max = math.MaxInt32 + case 64: + min = math.MinInt64 + max = math.MaxInt64 + default: + panic("weird number of bits in target int") + } + + iv, accuracy := bf.Int64() + if accuracy != big.Exact || iv < min || iv > max { + return path.NewErrorf("value must be a whole number, between %d and %d", min, max) + } + + target.SetInt(iv) + return nil +} + +func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var max uint64 + switch target.Type().Bits() { + case 8: + max = math.MaxUint8 + case 16: + max = math.MaxUint16 + case 32: + max = math.MaxUint32 + case 64: + max = math.MaxUint64 + default: + panic("weird number of bits in target uint") + } + + iv, accuracy := bf.Uint64() + if accuracy != big.Exact || iv > max { + return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max) + } + + target.SetUint(iv) + return nil +} + +func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.Float32, reflect.Float64: + fv, accuracy := bf.Float64() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(fv, 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64) + } + } + target.SetFloat(fv) + return nil + default: + panic("unsupported kind of float") + } +} + +func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error { + switch { + + case bigFloatType.ConvertibleTo(target.Type()): + // Easy! + target.Set(reflect.ValueOf(bf).Elem().Convert(target.Type())) + return nil + + case bigIntType.ConvertibleTo(target.Type()): + bi, accuracy := bf.Int(nil) + if accuracy != big.Exact { + return path.NewErrorf("value must be a whole number") + } + target.Set(reflect.ValueOf(bi).Elem().Convert(target.Type())) + return nil + + default: + return likelyRequiredTypesError(path, target) + } +} + +func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.String: + target.SetString(val.AsString()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a list of length %d", target.Len()) + } + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Map: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + tv := reflect.MakeMap(target.Type()) + et := target.Type().Elem() + + path = append(path, nil) + + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + ks := key.AsString() + + targetElem := reflect.New(et) + err = fromCtyValue(val, targetElem, path) + + tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem()) + + return err != nil + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a set of length %d", target.Len()) + } + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + return nil + + // TODO: decode into set.Set instance + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + attrTypes := val.Type().AttributeTypes() + targetFields := structTagIndices(target.Type()) + + path = append(path, nil) + + for k, i := range targetFields { + if _, exists := attrTypes[k]; !exists { + // If the field in question isn't able to represent nil, + // that's an error. + fk := target.Field(i).Kind() + switch fk { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: + // okay + default: + return path.NewErrorf("missing required attribute %q", k) + } + } + } + + for k := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + fieldIdx, exists := targetFields[k] + if !exists { + return path.NewErrorf("unsupported attribute %q", k) + } + + ev := val.GetAttr(k) + + targetField := target.Field(fieldIdx) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + elemTypes := val.Type().TupleElementTypes() + fieldCount := target.Type().NumField() + + if fieldCount != len(elemTypes) { + return path.NewErrorf("a tuple of %d elements is required", fieldCount) + } + + path = append(path, nil) + + for i := range elemTypes { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + ev := val.Index(cty.NumberIntVal(int64(i))) + + targetField := target.Field(i) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error { + + if target.Kind() == reflect.Ptr { + // Walk through indirection until we get to the last pointer, + // which we might set to null below. + target = fromCtyPopulatePtr(target, true) + + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + // Since a capsule contains a pointer to an object, we'll preserve + // that pointer on the way out and thus allow the caller to recover + // the original object, rather than a copy of it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Elem().Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + target.Set(reflect.ValueOf(val.EncapsulatedValue())) + + return nil + } else { + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + // If our target isn't a pointer then we will attempt to copy + // the encapsulated value into it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + // We know that EncapsulatedValue is always a pointer, so we + // can safely call .Elem on its reflect.Value. + target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem()) + + return nil + } + +} + +// fromCtyPopulatePtr recognizes when target is a pointer type and allocates +// a value to assign to that pointer, which it returns. +// +// If the given value has multiple levels of indirection, like **int, these +// will be processed in turn so that the return value is guaranteed to be +// a non-pointer. +// +// As an exception, if decodingNull is true then the returned value will be +// the final level of pointer, if any, so that the caller can assign it +// as nil to represent a null value. If the given target value is not a pointer +// at all then the returned value will be just the given target, so the caller +// must test if the returned value is a pointer before trying to assign nil +// to it. +func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value { + for { + if target.Kind() == reflect.Interface && !target.IsNil() { + e := target.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + target = e + } + } + + if target.Kind() != reflect.Ptr { + break + } + + // Stop early if we're decodingNull and we've found our last indirection + if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() { + break + } + + if target.IsNil() { + target.Set(reflect.New(target.Type().Elem())) + } + + target = target.Elem() + } + return target +} + +// likelyRequiredTypesError returns an error that states which types are +// acceptable by making some assumptions about what types we support for +// each target Go kind. It's not a precise science but it allows us to return +// an error message that is cty-user-oriented rather than Go-oriented. +// +// Generally these error messages should be a matter of last resort, since +// the calling application should be validating user-provided value types +// before decoding anyway. +func likelyRequiredTypesError(path cty.Path, target reflect.Value) error { + switch target.Kind() { + + case reflect.Bool: + return path.NewErrorf("bool value is required") + + case reflect.String: + return path.NewErrorf("string value is required") + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fallthrough + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fallthrough + case reflect.Float32, reflect.Float64: + return path.NewErrorf("number value is required") + + case reflect.Slice, reflect.Array: + return path.NewErrorf("list or set value is required") + + case reflect.Map: + return path.NewErrorf("map or object value is required") + + case reflect.Struct: + switch { + + case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType): + return path.NewErrorf("number value is required") + + case target.Type().AssignableTo(setType): + return path.NewErrorf("set or list value is required") + + default: + return path.NewErrorf("object or tuple value is required") + + } + + default: + // We should avoid getting into this path, since this error + // message is rather useless. + return path.NewErrorf("incorrect type") + + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go new file mode 100644 index 00000000..b4134253 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go @@ -0,0 +1,108 @@ +package gocty + +import ( + "reflect" + + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts +// to find a suitable cty.Type instance that could be used for a conversion +// with ToCtyValue. +// +// This allows -- for simple situations at least -- types to be defined just +// once in Go and the cty types derived from the Go types, but in the process +// it makes some assumptions that may be undesirable so applications are +// encouraged to build their cty types directly if exacting control is +// required. +// +// Not all Go types can be represented as cty types, so an error may be +// returned which is usually considered to be a bug in the calling program. +// In particular, ImpliedType will never use capsule types in its returned +// type, because it cannot know the capsule types supported by the calling +// program. +func ImpliedType(gv interface{}) (cty.Type, error) { + rt := reflect.TypeOf(gv) + var path cty.Path + return impliedType(rt, path) +} + +func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) { + switch rt.Kind() { + + case reflect.Ptr: + return impliedType(rt.Elem(), path) + + // Primitive types + case reflect.Bool: + return cty.Bool, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.Number, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.Number, nil + case reflect.Float32, reflect.Float64: + return cty.Number, nil + case reflect.String: + return cty.String, nil + + // Collection types + case reflect.Slice: + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.List(ety), nil + case reflect.Map: + if !stringType.AssignableTo(rt.Key()) { + return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt) + } + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.Map(ety), nil + + // Structural types + case reflect.Struct: + return impliedStructType(rt, path) + + default: + return cty.NilType, path.NewErrorf("no cty.Type for %s", rt) + } +} + +func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if valueType.AssignableTo(rt) { + // Special case: cty.Value represents cty.DynamicPseudoType, for + // type conformance checking. + return cty.DynamicPseudoType, nil + } + + fieldIdxs := structTagIndices(rt) + if len(fieldIdxs) == 0 { + return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt) + } + + atys := make(map[string]cty.Type, len(fieldIdxs)) + + { + // Temporary extension of path for attributes + path := append(path, nil) + + for k, fi := range fieldIdxs { + path[len(path)-1] = cty.GetAttrStep{Name: k} + + ft := rt.Field(fi).Type + aty, err := impliedType(ft, path) + if err != nil { + return cty.NilType, err + } + + atys[k] = aty + } + } + + return cty.Object(atys), nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/helper.go b/vendor/github.com/hashicorp/go-cty/cty/helper.go new file mode 100644 index 00000000..1b88e9fa --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/helper.go @@ -0,0 +1,99 @@ +package cty + +import ( + "fmt" +) + +// anyUnknown is a helper to easily check if a set of values contains any +// unknowns, for operations that short-circuit to return unknown in that case. +func anyUnknown(values ...Value) bool { + for _, val := range values { + if val.v == unknown { + return true + } + } + return false +} + +// typeCheck tests whether all of the given values belong to the given type. +// If the given types are a mixture of the given type and the dynamic +// pseudo-type then a short-circuit dynamic value is returned. If the given +// values are all of the correct type but at least one is unknown then +// a short-circuit unknown value is returned. If any other types appear then +// an error is returned. Otherwise (finally!) the result is nil, nil. +func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) { + hasDynamic := false + hasUnknown := false + + for i, val := range values { + if val.ty == DynamicPseudoType { + hasDynamic = true + continue + } + + if !val.Type().Equals(required) { + return nil, fmt.Errorf( + "type mismatch: want %s but value %d is %s", + required.FriendlyName(), + i, val.ty.FriendlyName(), + ) + } + + if val.v == unknown { + hasUnknown = true + } + } + + if hasDynamic { + return &DynamicVal, nil + } + + if hasUnknown { + ret := UnknownVal(ret) + return &ret, nil + } + + return nil, nil +} + +// mustTypeCheck is a wrapper around typeCheck that immediately panics if +// any error is returned. +func mustTypeCheck(required Type, ret Type, values ...Value) *Value { + shortCircuit, err := typeCheck(required, ret, values...) + if err != nil { + panic(err) + } + return shortCircuit +} + +// shortCircuitForceType takes the return value from mustTypeCheck and +// replaces it with an unknown of the given type if the original value was +// DynamicVal. +// +// This is useful for operations that are specified to always return a +// particular type, since then a dynamic result can safely be "upgrade" to +// a strongly-typed unknown, which then allows subsequent operations to +// be actually type-checked. +// +// It is safe to use this only if the operation in question is defined as +// returning either a value of the given type or panicking, since we know +// then that subsequent operations won't run if the operation panics. +// +// If the given short-circuit value is *not* DynamicVal then it must be +// of the given type, or this function will panic. +func forceShortCircuitType(shortCircuit *Value, ty Type) *Value { + if shortCircuit == nil { + return nil + } + + if shortCircuit.ty == DynamicPseudoType { + ret := UnknownVal(ty) + return &ret + } + + if !shortCircuit.ty.Equals(ty) { + panic("forceShortCircuitType got value of wrong type") + } + + return shortCircuit +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json.go b/vendor/github.com/hashicorp/go-cty/cty/json.go new file mode 100644 index 00000000..c421a62e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json.go @@ -0,0 +1,176 @@ +package cty + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// MarshalJSON is an implementation of json.Marshaler that allows Type +// instances to be serialized as JSON. +// +// All standard types can be serialized, but capsule types cannot since there +// is no way to automatically recover the original pointer and capsule types +// compare by equality. +func (t Type) MarshalJSON() ([]byte, error) { + switch impl := t.typeImpl.(type) { + case primitiveType: + switch impl.Kind { + case primitiveTypeBool: + return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil + case primitiveTypeNumber: + return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil + case primitiveTypeString: + return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil + default: + panic("unknown primitive type kind") + } + case typeList, typeMap, typeSet: + buf := &bytes.Buffer{} + etyJSON, err := t.ElementType().MarshalJSON() + if err != nil { + return nil, err + } + buf.WriteRune('[') + switch impl.(type) { + case typeList: + buf.WriteString(`"list"`) + case typeMap: + buf.WriteString(`"map"`) + case typeSet: + buf.WriteString(`"set"`) + } + buf.WriteRune(',') + buf.Write(etyJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeObject: + buf := &bytes.Buffer{} + atysJSON, err := json.Marshal(t.AttributeTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["object",`) + buf.Write(atysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeTuple: + buf := &bytes.Buffer{} + etysJSON, err := json.Marshal(t.TupleElementTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["tuple",`) + buf.Write(etysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case pseudoTypeDynamic: + return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil + case *capsuleType: + return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName()) + default: + // should never happen + panic("unknown type implementation") + } +} + +// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of +// MarshalJSON for information on the limitations of JSON serialization of +// types. +func (t *Type) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + *t = Bool + case "number": + *t = Number + case "string": + *t = String + case "dynamic": + *t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = List(ety) + case "map": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Map(ety) + case "set": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Set(ety) + case "object": + var atys map[string]Type + err = dec.Decode(&atys) + if err != nil { + return err + } + *t = Object(atys) + case "tuple": + var etys []Type + err = dec.Decode(&etys) + if err != nil { + return err + } + *t = Tuple(etys) + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/doc.go b/vendor/github.com/hashicorp/go-cty/cty/json/doc.go new file mode 100644 index 00000000..8916513d --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/doc.go @@ -0,0 +1,11 @@ +// Package json provides functions for serializing cty types and values in +// JSON format, and for decoding them again. +// +// Since the cty type system is a superset of the JSON type system, +// round-tripping through JSON is lossy unless type information is provided +// both at encoding time and decoding time. Callers of this package are +// therefore suggested to define their expected structure as a cty.Type +// and pass it in consistently both when encoding and when decoding, though +// default (type-lossy) behavior is provided for situations where the precise +// representation of the data is not significant. +package json diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go b/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go new file mode 100644 index 00000000..728ab010 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go @@ -0,0 +1,193 @@ +package json + +import ( + "bytes" + "encoding/json" + "sort" + + "github.com/hashicorp/go-cty/cty" +) + +func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { + if val.IsMarked() { + return path.NewErrorf("value has marks, so it cannot be seralized") + } + + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, b) + } + + if val.IsNull() { + b.WriteString("null") + return nil + } + + if !val.IsKnown() { + return path.NewErrorf("value is not known") + } + + // The caller should've guaranteed that the given val is conformant with + // the given type t, so we'll proceed under that assumption here. + + switch { + case t.IsPrimitiveType(): + switch t { + case cty.String: + json, err := json.Marshal(val.AsString()) + if err != nil { + return path.NewErrorf("failed to serialize value: %s", err) + } + b.Write(json) + return nil + case cty.Number: + if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) { + return path.NewErrorf("cannot serialize infinity as JSON") + } + b.WriteString(val.AsBigFloat().Text('f', -1)) + return nil + case cty.Bool: + if val.True() { + b.WriteString("true") + } else { + b.WriteString("false") + } + return nil + default: + panic("unsupported primitive type") + } + case t.IsListType(), t.IsSetType(): + b.WriteRune('[') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune(']') + return nil + case t.IsMapType(): + b.WriteRune('{') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune('}') + return nil + case t.IsTupleType(): + b.WriteRune('[') + etys := t.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + for it.Next() { + if i > 0 { + b.WriteRune(',') + } + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + i++ + } + b.WriteRune(']') + return nil + case t.IsObjectType(): + b.WriteRune('{') + atys := t.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + for i, k := range names { + aty := atys[k] + if i > 0 { + b.WriteRune(',') + } + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(av, aty, path, b) + if err != nil { + return err + } + } + b.WriteRune('}') + return nil + case t.IsCapsuleType(): + rawVal := val.EncapsulatedValue() + jsonVal, err := json.Marshal(rawVal) + if err != nil { + return path.NewError(err) + } + b.Write(jsonVal) + return nil + default: + // should never happen + return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error { + typeJSON, err := MarshalType(val.Type()) + if err != nil { + return path.NewErrorf("failed to serialize type: %s", err) + } + b.WriteString(`{"value":`) + marshal(val, val.Type(), path, b) + b.WriteString(`,"type":`) + b.Write(typeJSON) + b.WriteRune('}') + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/simple.go b/vendor/github.com/hashicorp/go-cty/cty/json/simple.go new file mode 100644 index 00000000..aaba8c3b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/simple.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// SimpleJSONValue is a wrapper around cty.Value that adds implementations of +// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic +// encoding and decoding of values. +// +// The couplet Marshal and Unmarshal both take extra type information to +// inform the encoding and decoding process so that all of the cty types +// can be represented even though JSON's type system is a subset. +// +// SimpleJSONValue instead takes the approach of discarding the value's type +// information and then deriving a new type from the stored structure when +// decoding. This results in the same data being returned but not necessarily +// with exactly the same type. +// +// For information on how types are inferred when decoding, see the +// documentation of the function ImpliedType. +type SimpleJSONValue struct { + cty.Value +} + +// MarshalJSON is an implementation of json.Marshaler. See the documentation +// of SimpleJSONValue for more information. +func (v SimpleJSONValue) MarshalJSON() ([]byte, error) { + return Marshal(v.Value, v.Type()) +} + +// UnmarshalJSON is an implementation of json.Unmarshaler. See the +// documentation of SimpleJSONValue for more information. +func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error { + t, err := ImpliedType(buf) + if err != nil { + return err + } + v.Value, err = Unmarshal(buf, t) + return err +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/type.go b/vendor/github.com/hashicorp/go-cty/cty/json/type.go new file mode 100644 index 00000000..59d7f2e1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/type.go @@ -0,0 +1,23 @@ +package json + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// MarshalType returns a JSON serialization of the given type. +// +// This is just a thin wrapper around t.MarshalJSON, for symmetry with +// UnmarshalType. +func MarshalType(t cty.Type) ([]byte, error) { + return t.MarshalJSON() +} + +// UnmarshalType decodes a JSON serialization of the given type as produced +// by either Type.MarshalJSON or MarshalType. +// +// This is a convenience wrapper around Type.UnmarshalJSON. +func UnmarshalType(buf []byte) (cty.Type, error) { + var t cty.Type + err := t.UnmarshalJSON(buf) + return t, err +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go new file mode 100644 index 00000000..8adf22bb --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go @@ -0,0 +1,170 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// JSON-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary JSON without explicit cty Type +// information. +// +// The rules are as follows: +// +// JSON strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// JSON objects map to cty object types, with the attributes defined by the +// object keys and the types of their values. +// +// JSON arrays map to cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any JSON syntax errors will be returned as an error, and the type will +// be the invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + if dec.More() { + return cty.NilType, fmt.Errorf("extraneous data after JSON object") + } + + return ty, nil +} + +func impliedType(dec *json.Decoder) (cty.Type, error) { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + return impliedTypeForTok(tok, dec) +} + +func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) { + if tok == nil { + return cty.DynamicPseudoType, nil + } + + switch ttok := tok.(type) { + case bool: + return cty.Bool, nil + + case json.Number: + return cty.Number, nil + + case string: + return cty.String, nil + + case json.Delim: + + switch rune(ttok) { + case '{': + return impliedObjectType(dec) + case '[': + return impliedTupleType(dec) + default: + return cty.NilType, fmt.Errorf("unexpected token %q", ttok) + } + + default: + return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok) + } +} + +func impliedObjectType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first object key. + + var atys map[string]cty.Type + + for { + // Read the object key first + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != '}' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + key, ok := tok.(string) + if !ok { + return cty.NilType, fmt.Errorf("expected string but found %T", tok) + } + + // Now read the value + tok, err = dec.Token() + if err != nil { + return cty.NilType, err + } + + aty, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[key] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the [ delimiter + // and so our next token should be the first value. + + var etys []cty.Type + + for { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) == ']' { + break + } + } + + ety, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + etys = append(etys, ety) + } + + if len(etys) == 0 { + return cty.EmptyTuple, nil + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go b/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go new file mode 100644 index 00000000..5ad190d3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go @@ -0,0 +1,459 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return cty.NilVal, path.NewError(err) + } + + if tok == nil { + return cty.NullVal(t), nil + } + + if t == cty.DynamicPseudoType { + return unmarshalDynamic(buf, path) + } + + switch { + case t.IsPrimitiveType(): + val, err := unmarshalPrimitive(tok, t, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case t.IsListType(): + return unmarshalList(buf, t.ElementType(), path) + case t.IsSetType(): + return unmarshalSet(buf, t.ElementType(), path) + case t.IsMapType(): + return unmarshalMap(buf, t.ElementType(), path) + case t.IsTupleType(): + return unmarshalTuple(buf, t.TupleElementTypes(), path) + case t.IsObjectType(): + return unmarshalObject(buf, t.AttributeTypes(), path) + case t.IsCapsuleType(): + return unmarshalCapsule(buf, t, path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName()) + } +} + +func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) { + + switch t { + case cty.Bool: + switch v := tok.(type) { + case bool: + return cty.BoolVal(v), nil + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("bool is required") + } + case cty.Number: + if v, ok := tok.(json.Number); ok { + tok = string(v) + } + switch v := tok.(type) { + case string: + val, err := cty.ParseNumberVal(v) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("number is required") + } + case cty.String: + switch v := tok.(type) { + case string: + return cty.StringVal(v), nil + case json.Number: + return cty.StringVal(string(v)), nil + case bool: + val, err := convert.Convert(cty.BoolVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("string is required") + } + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int64 + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(idx), + } + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read list value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(ety), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read set value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(cty.String), + } + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map key: %s", err) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int + + for dec.More() { + if idx >= len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys)) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(idx)), + } + ety := etys[idx] + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) != len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys)) + } + + if len(vals) == 0 { + return cty.EmptyTupleVal, nil + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + objPath := path // some errors report from the object's perspective + path := append(path, nil) // path to a specific attribute + + for dec.More() { + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object key: %s", err) + } + + aty, ok := atys[k] + if !ok { + return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k) + } + + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object value: %s", err) + } + + el, err := unmarshal(rawVal, aty, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + // Make sure we have a value for every attribute + for k, aty := range atys { + if _, exists := vals[k]; !exists { + vals[k] = cty.NullVal(aty) + } + } + + if len(vals) == 0 { + return cty.EmptyObjectVal, nil + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + rawType := t.EncapsulatedType() + ptrPtr := reflect.New(reflect.PtrTo(rawType)) + ptrPtr.Elem().Set(reflect.New(rawType)) + ptr := ptrPtr.Elem().Interface() + err := json.Unmarshal(buf, ptr) + if err != nil { + return cty.NilVal, path.NewError(err) + } + + return cty.CapsuleVal(t, ptr), nil +} + +func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + var t cty.Type + var valBody []byte // defer actual decoding until we know the type + + for dec.More() { + var err error + + key, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err) + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err) + } + + switch key { + case "type": + err := json.Unmarshal(rawVal, &t) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err) + } + case "value": + valBody = rawVal + default: + return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key) + } + + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if t == cty.NilType { + return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value") + } + + val, err := Unmarshal([]byte(valBody), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil +} + +func requireDelim(dec *json.Decoder, d rune) error { + tok, err := dec.Token() + if err != nil { + return err + } + + if tok != json.Delim(d) { + return fmt.Errorf("missing expected %c", d) + } + + return nil +} + +func requireObjectKey(dec *json.Decoder) (string, error) { + tok, err := dec.Token() + if err != nil { + return "", err + } + if s, ok := tok.(string); ok { + return s, nil + } + return "", fmt.Errorf("missing expected object key") +} + +func readRawValue(dec *json.Decoder) ([]byte, error) { + var rawVal json.RawMessage + err := dec.Decode(&rawVal) + if err != nil { + return nil, err + } + return []byte(rawVal), nil +} + +func bufDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/value.go b/vendor/github.com/hashicorp/go-cty/cty/json/value.go new file mode 100644 index 00000000..50748f70 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/value.go @@ -0,0 +1,65 @@ +package json + +import ( + "bytes" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +// Marshal produces a JSON representation of the given value that can later +// be decoded into a value of the given type. +// +// A type is specified separately to allow for the given type to include +// cty.DynamicPseudoType to represent situations where any type is permitted +// and so type information must be included to allow recovery of the stored +// structure when decoding. +// +// The given type will also be used to attempt automatic conversions of any +// non-conformant types in the given value, although this will not always +// be possible. If the value cannot be made to be conformant then an error is +// returned, which may be a cty.PathError. +// +// Capsule-typed values can be marshalled, but with some caveats. Since +// capsule values are compared by pointer equality, it is impossible to recover +// a value that will compare equal to the original value. Additionally, +// it's not possible to JSON-serialize the capsule type itself, so it's not +// valid to use capsule types within parts of the value that are conformed to +// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as +// the encapsulated type itself is serializable with the Marshal function +// in encoding/json. +func Marshal(val cty.Value, t cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(t) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, t) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + buf := &bytes.Buffer{} + var path cty.Path + err := marshal(val, t, path, buf) + + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Unmarshal decodes a JSON representation of the given value into a cty Value +// conforming to the given type. +// +// While decoding, type conversions will be done where possible to make +// the result conformant even if the types given in JSON are not exactly +// correct. If conversion isn't possible then an error is returned, which +// may be a cty.PathError. +func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) { + var path cty.Path + return unmarshal(buf, t, path) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/list_type.go b/vendor/github.com/hashicorp/go-cty/cty/list_type.go new file mode 100644 index 00000000..2ef02a12 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/list_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeList struct { + typeImplSigil + ElementTypeT Type +} + +// List creates a map type with the given element Type. +// +// List types are CollectionType implementations. +func List(elem Type) Type { + return Type{ + typeList{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a list whose element type is +// equal to that of the receiver. +func (t typeList) Equals(other Type) bool { + ot, isList := other.typeImpl.(typeList) + if !isList { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeList) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "list of " + elemName +} + +func (t typeList) ElementType() Type { + return t.ElementTypeT +} + +func (t typeList) GoString() string { + return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT) +} + +// IsListType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsListType() bool { + _, ok := t.typeImpl.(typeList) + return ok +} + +// ListElementType is a convenience method that checks if the given type is +// a list type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.ListElementType(); et != nil { +// // Do something with *et +// } +func (t Type) ListElementType() *Type { + if lt, ok := t.typeImpl.(typeList); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/map_type.go b/vendor/github.com/hashicorp/go-cty/cty/map_type.go new file mode 100644 index 00000000..82d36c62 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/map_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeMap struct { + typeImplSigil + ElementTypeT Type +} + +// Map creates a map type with the given element Type. +// +// Map types are CollectionType implementations. +func Map(elem Type) Type { + return Type{ + typeMap{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a map whose element type is +// equal to that of the receiver. +func (t typeMap) Equals(other Type) bool { + ot, isMap := other.typeImpl.(typeMap) + if !isMap { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeMap) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "map of " + elemName +} + +func (t typeMap) ElementType() Type { + return t.ElementTypeT +} + +func (t typeMap) GoString() string { + return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) +} + +// IsMapType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsMapType() bool { + _, ok := t.typeImpl.(typeMap) + return ok +} + +// MapElementType is a convenience method that checks if the given type is +// a map type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.MapElementType(); et != nil { +// // Do something with *et +// } +func (t Type) MapElementType() *Type { + if lt, ok := t.typeImpl.(typeMap); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/marks.go b/vendor/github.com/hashicorp/go-cty/cty/marks.go new file mode 100644 index 00000000..3898e455 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/marks.go @@ -0,0 +1,296 @@ +package cty + +import ( + "fmt" + "strings" +) + +// marker is an internal wrapper type used to add special "marks" to values. +// +// A "mark" is an annotation that can be used to represent additional +// characteristics of values that propagate through operation methods to +// result values. However, a marked value cannot be used with integration +// methods normally associated with its type, in order to ensure that +// calling applications don't inadvertently drop marks as they round-trip +// values out of cty and back in again. +// +// Marked values are created only explicitly by the calling application, so +// an application that never marks a value does not need to worry about +// encountering marked values. +type marker struct { + realV interface{} + marks ValueMarks +} + +// ValueMarks is a map, representing a set, of "mark" values associated with +// a Value. See Value.Mark for more information on the usage of mark values. +type ValueMarks map[interface{}]struct{} + +// NewValueMarks constructs a new ValueMarks set with the given mark values. +func NewValueMarks(marks ...interface{}) ValueMarks { + if len(marks) == 0 { + return nil + } + ret := make(ValueMarks, len(marks)) + for _, v := range marks { + ret[v] = struct{}{} + } + return ret +} + +// Equal returns true if the receiver and the given ValueMarks both contain +// the same marks. +func (m ValueMarks) Equal(o ValueMarks) bool { + if len(m) != len(o) { + return false + } + for v := range m { + if _, ok := o[v]; !ok { + return false + } + } + return true +} + +func (m ValueMarks) GoString() string { + var s strings.Builder + s.WriteString("cty.NewValueMarks(") + i := 0 + for mv := range m { + if i != 0 { + s.WriteString(", ") + } + s.WriteString(fmt.Sprintf("%#v", mv)) + i++ + } + s.WriteString(")") + return s.String() +} + +// IsMarked returns true if and only if the receiving value carries at least +// one mark. A marked value cannot be used directly with integration methods +// without explicitly unmarking it (and retrieving the markings) first. +func (val Value) IsMarked() bool { + _, ok := val.v.(marker) + return ok +} + +// HasMark returns true if and only if the receiving value has the given mark. +func (val Value) HasMark(mark interface{}) bool { + if mr, ok := val.v.(marker); ok { + _, ok := mr.marks[mark] + return ok + } + return false +} + +// ContainsMarked returns true if the receiving value or any value within it +// is marked. +// +// This operation is relatively expensive. If you only need a shallow result, +// use IsMarked instead. +func (val Value) ContainsMarked() bool { + ret := false + Walk(val, func(_ Path, v Value) (bool, error) { + if v.IsMarked() { + ret = true + return false, nil + } + return true, nil + }) + return ret +} + +func (val Value) assertUnmarked() { + if val.IsMarked() { + panic("value is marked, so must be unmarked first") + } +} + +// Marks returns a map (representing a set) of all of the mark values +// associated with the receiving value, without changing the marks. Returns nil +// if the value is not marked at all. +func (val Value) Marks() ValueMarks { + if mr, ok := val.v.(marker); ok { + // copy so that the caller can't mutate our internals + ret := make(ValueMarks, len(mr.marks)) + for k, v := range mr.marks { + ret[k] = v + } + return ret + } + return nil +} + +// HasSameMarks returns true if an only if the receiver and the given other +// value have identical marks. +func (val Value) HasSameMarks(other Value) bool { + vm, vmOK := val.v.(marker) + om, omOK := other.v.(marker) + if vmOK != omOK { + return false + } + if vmOK { + return vm.marks.Equal(om.marks) + } + return true +} + +// Mark returns a new value that as the same type and underlying value as +// the receiver but that also carries the given value as a "mark". +// +// Marks are used to carry additional application-specific characteristics +// associated with values. A marked value can be used with operation methods, +// in which case the marks are propagated to the operation results. A marked +// value _cannot_ be used with integration methods, so callers of those +// must derive an unmarked value using Unmark (and thus explicitly handle +// the markings) before calling the integration methods. +// +// The mark value can be any value that would be valid to use as a map key. +// The mark value should be of a named type in order to use the type itself +// as a namespace for markings. That type can be unexported if desired, in +// order to ensure that the mark can only be handled through the defining +// package's own functions. +// +// An application that never calls this method does not need to worry about +// handling marked values. +func (val Value) Mark(mark interface{}) Value { + var newMarker marker + newMarker.realV = val.v + if mr, ok := val.v.(marker); ok { + // It's already a marker, so we'll retain existing marks. + newMarker.marks = make(ValueMarks, len(mr.marks)+1) + for k, v := range mr.marks { + newMarker.marks[k] = v + } + } else { + // It's not a marker yet, so we're creating the first mark. + newMarker.marks = make(ValueMarks, 1) + } + newMarker.marks[mark] = struct{}{} + return Value{ + ty: val.ty, + v: newMarker, + } +} + +// Unmark separates the marks of the receiving value from the value itself, +// removing a new unmarked value and a map (representing a set) of the marks. +// +// If the receiver isn't marked, Unmark returns it verbatim along with a nil +// map of marks. +func (val Value) Unmark() (Value, ValueMarks) { + if !val.IsMarked() { + return val, nil + } + mr := val.v.(marker) + marks := val.Marks() // copy so that the caller can't mutate our internals + return Value{ + ty: val.ty, + v: mr.realV, + }, marks +} + +// UnmarkDeep is similar to Unmark, but it works with an entire nested structure +// rather than just the given value directly. +// +// The result is guaranteed to contain no nested values that are marked, and +// the returned marks set includes the superset of all of the marks encountered +// during the operation. +func (val Value) UnmarkDeep() (Value, ValueMarks) { + marks := make(ValueMarks) + ret, _ := Transform(val, func(_ Path, v Value) (Value, error) { + unmarkedV, valueMarks := v.Unmark() + for m, s := range valueMarks { + marks[m] = s + } + return unmarkedV, nil + }) + return ret, marks +} + +func (val Value) unmarkForce() Value { + unw, _ := val.Unmark() + return unw +} + +// WithMarks returns a new value that has the same type and underlying value +// as the receiver and also has the marks from the given maps (representing +// sets). +func (val Value) WithMarks(marks ...ValueMarks) Value { + if len(marks) == 0 { + return val + } + ownMarks := val.Marks() + markCount := len(ownMarks) + for _, s := range marks { + markCount += len(s) + } + if markCount == 0 { + return val + } + newMarks := make(ValueMarks, markCount) + for m := range ownMarks { + newMarks[m] = struct{}{} + } + for _, s := range marks { + for m := range s { + newMarks[m] = struct{}{} + } + } + v := val.v + if mr, ok := v.(marker); ok { + v = mr.realV + } + return Value{ + ty: val.ty, + v: marker{ + realV: v, + marks: newMarks, + }, + } +} + +// WithSameMarks returns a new value that has the same type and underlying +// value as the receiver and also has the marks from the given source values. +// +// Use this if you are implementing your own higher-level operations against +// cty using the integration methods, to re-introduce the marks from the +// source values of the operation. +func (val Value) WithSameMarks(srcs ...Value) Value { + if len(srcs) == 0 { + return val + } + ownMarks := val.Marks() + markCount := len(ownMarks) + for _, sv := range srcs { + if mr, ok := sv.v.(marker); ok { + markCount += len(mr.marks) + } + } + if markCount == 0 { + return val + } + newMarks := make(ValueMarks, markCount) + for m := range ownMarks { + newMarks[m] = struct{}{} + } + for _, sv := range srcs { + if mr, ok := sv.v.(marker); ok { + for m := range mr.marks { + newMarks[m] = struct{}{} + } + } + } + v := val.v + if mr, ok := v.(marker); ok { + v = mr.realV + } + return Value{ + ty: val.ty, + v: marker{ + realV: v, + marks: newMarks, + }, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go similarity index 100% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go similarity index 95% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go index 1b631d0a..ce59d9ff 100644 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go @@ -3,8 +3,8 @@ package msgpack import ( "bytes" + "github.com/hashicorp/go-cty/cty" "github.com/vmihailenco/msgpack" - "github.com/zclconf/go-cty/cty" ) type dynamicVal struct { diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go similarity index 100% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go similarity index 98% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go index 51c75aa8..8a43c16a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go @@ -5,9 +5,9 @@ import ( "math/big" "sort" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" "github.com/vmihailenco/msgpack" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" ) // Marshal produces a msgpack serialization of the given value that diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go similarity index 99% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go index 6f6022e4..86664bac 100644 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go @@ -5,9 +5,9 @@ import ( "fmt" "io" + "github.com/hashicorp/go-cty/cty" "github.com/vmihailenco/msgpack" msgpackcodes "github.com/vmihailenco/msgpack/codes" - "github.com/zclconf/go-cty/cty" ) // ImpliedType returns the cty Type implied by the structure of the given diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go similarity index 100% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go similarity index 99% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go index 51bb76a8..67f4c9a4 100644 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go @@ -3,9 +3,9 @@ package msgpack import ( "bytes" + "github.com/hashicorp/go-cty/cty" "github.com/vmihailenco/msgpack" msgpackCodes "github.com/vmihailenco/msgpack/codes" - "github.com/zclconf/go-cty/cty" ) // Unmarshal interprets the given bytes as a msgpack-encoded cty Value of diff --git a/vendor/github.com/hashicorp/go-cty/cty/null.go b/vendor/github.com/hashicorp/go-cty/cty/null.go new file mode 100644 index 00000000..d58d0287 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/null.go @@ -0,0 +1,14 @@ +package cty + +// NullVal returns a null value of the given type. A null can be created of any +// type, but operations on such values will always panic. Calling applications +// are encouraged to use nulls only sparingly, particularly when user-provided +// expressions are to be evaluated, since the precence of nulls creates a +// much higher chance of evaluation errors that can't be caught by a type +// checker. +func NullVal(t Type) Value { + return Value{ + ty: t, + v: nil, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/object_type.go b/vendor/github.com/hashicorp/go-cty/cty/object_type.go new file mode 100644 index 00000000..187d3875 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/object_type.go @@ -0,0 +1,135 @@ +package cty + +import ( + "fmt" +) + +type typeObject struct { + typeImplSigil + AttrTypes map[string]Type +} + +// Object creates an object type with the given attribute types. +// +// After a map is passed to this function the caller must no longer access it, +// since ownership is transferred to this library. +func Object(attrTypes map[string]Type) Type { + attrTypesNorm := make(map[string]Type, len(attrTypes)) + for k, v := range attrTypes { + attrTypesNorm[NormalizeString(k)] = v + } + + return Type{ + typeObject{ + AttrTypes: attrTypesNorm, + }, + } +} + +func (t typeObject) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeObject); ok { + if len(t.AttrTypes) != len(ot.AttrTypes) { + // Fast path: if we don't have the same number of attributes + // then we can't possibly be equal. This also avoids the need + // to test attributes in both directions below, since we know + // there can't be extras in "other". + return false + } + + for attr, ty := range t.AttrTypes { + oty, ok := ot.AttrTypes[attr] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeObject) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write an object type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // an object type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "object" +} + +func (t typeObject) GoString() string { + if len(t.AttrTypes) == 0 { + return "cty.EmptyObject" + } + return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes) +} + +// EmptyObject is a shorthand for Object(map[string]Type{}), to more +// easily talk about the empty object type. +var EmptyObject Type + +// EmptyObjectVal is the only possible non-null, non-unknown value of type +// EmptyObject. +var EmptyObjectVal Value + +func init() { + EmptyObject = Object(map[string]Type{}) + EmptyObjectVal = Value{ + ty: EmptyObject, + v: map[string]interface{}{}, + } +} + +// IsObjectType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsObjectType() bool { + _, ok := t.typeImpl.(typeObject) + return ok +} + +// HasAttribute returns true if the receiver has an attribute with the given +// name, regardless of its type. Will panic if the reciever isn't an object +// type; use IsObjectType to determine whether this operation will succeed. +func (t Type) HasAttribute(name string) bool { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + _, hasAttr := ot.AttrTypes[name] + return hasAttr + } + panic("HasAttribute on non-object Type") +} + +// AttributeType returns the type of the attribute with the given name. Will +// panic if the receiver is not an object type (use IsObjectType to confirm) +// or if the object type has no such attribute (use HasAttribute to confirm). +func (t Type) AttributeType(name string) Type { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + aty, hasAttr := ot.AttrTypes[name] + if !hasAttr { + panic("no such attribute") + } + return aty + } + panic("AttributeType on non-object Type") +} + +// AttributeTypes returns a map from attribute names to their associated +// types. Will panic if the receiver is not an object type (use IsObjectType +// to confirm). +// +// The returned map is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the returned +// map. For many purposes the attribute-related methods of Value are more +// appropriate and more convenient to use. +func (t Type) AttributeTypes() map[string]Type { + if ot, ok := t.typeImpl.(typeObject); ok { + return ot.AttrTypes + } + panic("AttributeTypes on non-object Type") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/path.go b/vendor/github.com/hashicorp/go-cty/cty/path.go new file mode 100644 index 00000000..636e68c6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/path.go @@ -0,0 +1,270 @@ +package cty + +import ( + "errors" + "fmt" +) + +// A Path is a sequence of operations to locate a nested value within a +// data structure. +// +// The empty Path represents the given item. Any PathSteps within represent +// taking a single step down into a data structure. +// +// Path has some convenience methods for gradually constructing a path, +// but callers can also feel free to just produce a slice of PathStep manually +// and convert to this type, which may be more appropriate in environments +// where memory pressure is a concern. +// +// Although a Path is technically mutable, by convention callers should not +// mutate a path once it has been built and passed to some other subsystem. +// Instead, use Copy and then mutate the copy before using it. +type Path []PathStep + +// PathStep represents a single step down into a data structure, as part +// of a Path. PathStep is a closed interface, meaning that the only +// permitted implementations are those within this package. +type PathStep interface { + pathStepSigil() pathStepImpl + Apply(Value) (Value, error) +} + +// embed pathImpl into a struct to declare it a PathStep implementation +type pathStepImpl struct{} + +func (p pathStepImpl) pathStepSigil() pathStepImpl { + return p +} + +// Index returns a new Path that is the reciever with an IndexStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) Index(v Value) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = IndexStep{ + Key: v, + } + return ret +} + +// IndexInt is a typed convenience method for Index. +func (p Path) IndexInt(v int) Path { + return p.Index(NumberIntVal(int64(v))) +} + +// IndexString is a typed convenience method for Index. +func (p Path) IndexString(v string) Path { + return p.Index(StringVal(v)) +} + +// IndexPath is a convenience method to start a new Path with an IndexStep. +func IndexPath(v Value) Path { + return Path{}.Index(v) +} + +// IndexIntPath is a typed convenience method for IndexPath. +func IndexIntPath(v int) Path { + return IndexPath(NumberIntVal(int64(v))) +} + +// IndexStringPath is a typed convenience method for IndexPath. +func IndexStringPath(v string) Path { + return IndexPath(StringVal(v)) +} + +// GetAttr returns a new Path that is the reciever with a GetAttrStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) GetAttr(name string) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = GetAttrStep{ + Name: name, + } + return ret +} + +// Equals compares 2 Paths for exact equality. +func (p Path) Equals(other Path) bool { + if len(p) != len(other) { + return false + } + + for i := range p { + pv := p[i] + switch pv := pv.(type) { + case GetAttrStep: + ov, ok := other[i].(GetAttrStep) + if !ok || pv != ov { + return false + } + case IndexStep: + ov, ok := other[i].(IndexStep) + if !ok { + return false + } + + if !pv.Key.RawEquals(ov.Key) { + return false + } + default: + // Any invalid steps default to evaluating false. + return false + } + } + + return true + +} + +// HasPrefix determines if the path p contains the provided prefix. +func (p Path) HasPrefix(prefix Path) bool { + if len(prefix) > len(p) { + return false + } + + return p[:len(prefix)].Equals(prefix) +} + +// GetAttrPath is a convenience method to start a new Path with a GetAttrStep. +func GetAttrPath(name string) Path { + return Path{}.GetAttr(name) +} + +// Apply applies each of the steps in turn to successive values starting with +// the given value, and returns the result. If any step returns an error, +// the whole operation returns an error. +func (p Path) Apply(val Value) (Value, error) { + var err error + for i, step := range p { + val, err = step.Apply(val) + if err != nil { + return NilVal, fmt.Errorf("at step %d: %s", i, err) + } + } + return val, nil +} + +// LastStep applies the given path up to the last step and then returns +// the resulting value and the final step. +// +// This is useful when dealing with assignment operations, since in that +// case the *value* of the last step is not important (and may not, in fact, +// present at all) and we care only about its location. +// +// Since LastStep applies all steps except the last, it will return errors +// for those steps in the same way as Apply does. +// +// If the path has *no* steps then the returned PathStep will be nil, +// representing that any operation should be applied directly to the +// given value. +func (p Path) LastStep(val Value) (Value, PathStep, error) { + var err error + + if len(p) == 0 { + return val, nil, nil + } + + journey := p[:len(p)-1] + val, err = journey.Apply(val) + if err != nil { + return NilVal, nil, err + } + return val, p[len(p)-1], nil +} + +// Copy makes a shallow copy of the receiver. Often when paths are passed to +// caller code they come with the constraint that they are valid only until +// the caller returns, due to how they are constructed internally. Callers +// can use Copy to conveniently produce a copy of the value that _they_ control +// the validity of. +func (p Path) Copy() Path { + ret := make(Path, len(p)) + copy(ret, p) + return ret +} + +// IndexStep is a Step implementation representing applying the index operation +// to a value, which must be of either a list, map, or set type. +// +// When describing a path through a *type* rather than a concrete value, +// the Key may be an unknown value, indicating that the step applies to +// *any* key of the given type. +// +// When indexing into a set, the Key is actually the element being accessed +// itself, since in sets elements are their own identity. +type IndexStep struct { + pathStepImpl + Key Value +} + +// Apply returns the value resulting from indexing the given value with +// our key value. +func (s IndexStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot index a null value") + } + + switch s.Key.Type() { + case Number: + if !(val.Type().IsListType() || val.Type().IsTupleType()) { + return NilVal, errors.New("not a list type") + } + case String: + if !val.Type().IsMapType() { + return NilVal, errors.New("not a map type") + } + default: + return NilVal, errors.New("key value not number or string") + } + + has := val.HasIndex(s.Key) + if !has.IsKnown() { + return UnknownVal(val.Type().ElementType()), nil + } + if !has.True() { + return NilVal, errors.New("value does not have given index key") + } + + return val.Index(s.Key), nil +} + +func (s IndexStep) GoString() string { + return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key) +} + +// GetAttrStep is a Step implementation representing retrieving an attribute +// from a value, which must be of an object type. +type GetAttrStep struct { + pathStepImpl + Name string +} + +// Apply returns the value of our named attribute from the given value, which +// must be of an object type that has a value of that name. +func (s GetAttrStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot access attributes on a null value") + } + + if !val.Type().IsObjectType() { + return NilVal, errors.New("not an object type") + } + + if !val.Type().HasAttribute(s.Name) { + return NilVal, fmt.Errorf("object has no attribute %q", s.Name) + } + + return val.GetAttr(s.Name), nil +} + +func (s GetAttrStep) GoString() string { + return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/path_set.go b/vendor/github.com/hashicorp/go-cty/cty/path_set.go new file mode 100644 index 00000000..977523de --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/path_set.go @@ -0,0 +1,198 @@ +package cty + +import ( + "fmt" + "hash/crc64" + + "github.com/hashicorp/go-cty/cty/set" +) + +// PathSet represents a set of Path objects. This can be used, for example, +// to talk about a subset of paths within a value that meet some criteria, +// without directly modifying the values at those paths. +type PathSet struct { + set set.Set +} + +// NewPathSet creates and returns a PathSet, with initial contents optionally +// set by the given arguments. +func NewPathSet(paths ...Path) PathSet { + ret := PathSet{ + set: set.NewSet(pathSetRules{}), + } + + for _, path := range paths { + ret.Add(path) + } + + return ret +} + +// Add inserts a single given path into the set. +// +// Paths are immutable after construction by convention. It is particularly +// important not to mutate a path after it has been placed into a PathSet. +// If a Path is mutated while in a set, behavior is undefined. +func (s PathSet) Add(path Path) { + s.set.Add(path) +} + +// AddAllSteps is like Add but it also adds all of the steps leading to +// the given path. +// +// For example, if given a path representing "foo.bar", it will add both +// "foo" and "bar". +func (s PathSet) AddAllSteps(path Path) { + for i := 1; i <= len(path); i++ { + s.Add(path[:i]) + } +} + +// Has returns true if the given path is in the receiving set. +func (s PathSet) Has(path Path) bool { + return s.set.Has(path) +} + +// List makes and returns a slice of all of the paths in the receiving set, +// in an undefined but consistent order. +func (s PathSet) List() []Path { + if s.Empty() { + return nil + } + ret := make([]Path, 0, s.set.Length()) + for it := s.set.Iterator(); it.Next(); { + ret = append(ret, it.Value().(Path)) + } + return ret +} + +// Remove modifies the receving set to no longer include the given path. +// If the given path was already absent, this is a no-op. +func (s PathSet) Remove(path Path) { + s.set.Remove(path) +} + +// Empty returns true if the length of the receiving set is zero. +func (s PathSet) Empty() bool { + return s.set.Length() == 0 +} + +// Union returns a new set whose contents are the union of the receiver and +// the given other set. +func (s PathSet) Union(other PathSet) PathSet { + return PathSet{ + set: s.set.Union(other.set), + } +} + +// Intersection returns a new set whose contents are the intersection of the +// receiver and the given other set. +func (s PathSet) Intersection(other PathSet) PathSet { + return PathSet{ + set: s.set.Intersection(other.set), + } +} + +// Subtract returns a new set whose contents are those from the receiver with +// any elements of the other given set subtracted. +func (s PathSet) Subtract(other PathSet) PathSet { + return PathSet{ + set: s.set.Subtract(other.set), + } +} + +// SymmetricDifference returns a new set whose contents are the symmetric +// difference of the receiver and the given other set. +func (s PathSet) SymmetricDifference(other PathSet) PathSet { + return PathSet{ + set: s.set.SymmetricDifference(other.set), + } +} + +// Equal returns true if and only if both the receiver and the given other +// set contain exactly the same paths. +func (s PathSet) Equal(other PathSet) bool { + if s.set.Length() != other.set.Length() { + return false + } + // Now we know the lengths are the same we only need to test in one + // direction whether everything in one is in the other. + for it := s.set.Iterator(); it.Next(); { + if !other.set.Has(it.Value()) { + return false + } + } + return true +} + +var crc64Table = crc64.MakeTable(crc64.ISO) + +var indexStepPlaceholder = []byte("#") + +// pathSetRules is an implementation of set.Rules from the set package, +// used internally within PathSet. +type pathSetRules struct { +} + +func (r pathSetRules) Hash(v interface{}) int { + path := v.(Path) + hash := crc64.New(crc64Table) + + for _, rawStep := range path { + switch step := rawStep.(type) { + case GetAttrStep: + // (this creates some garbage converting the string name to a + // []byte, but that's okay since cty is not designed to be + // used in tight loops under memory pressure.) + hash.Write([]byte(step.Name)) + default: + // For any other step type we just append a predefined value, + // which means that e.g. all indexes into a given collection will + // hash to the same value but we assume that collections are + // small and thus this won't hurt too much. + hash.Write(indexStepPlaceholder) + } + } + + // We discard half of the hash on 32-bit platforms; collisions just make + // our lookups take marginally longer, so not a big deal. + return int(hash.Sum64()) +} + +func (r pathSetRules) Equivalent(a, b interface{}) bool { + aPath := a.(Path) + bPath := b.(Path) + + if len(aPath) != len(bPath) { + return false + } + + for i := range aPath { + switch aStep := aPath[i].(type) { + case GetAttrStep: + bStep, ok := bPath[i].(GetAttrStep) + if !ok { + return false + } + + if aStep.Name != bStep.Name { + return false + } + case IndexStep: + bStep, ok := bPath[i].(IndexStep) + if !ok { + return false + } + + eq := aStep.Key.Equals(bStep.Key) + if !eq.IsKnown() || eq.False() { + return false + } + default: + // Should never happen, since we document PathStep as a closed type. + panic(fmt.Errorf("unsupported step type %T", aStep)) + } + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go b/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go new file mode 100644 index 00000000..7b3d1196 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go @@ -0,0 +1,122 @@ +package cty + +import "math/big" + +// primitiveType is the hidden implementation of the various primitive types +// that are exposed as variables in this package. +type primitiveType struct { + typeImplSigil + Kind primitiveTypeKind +} + +type primitiveTypeKind byte + +const ( + primitiveTypeBool primitiveTypeKind = 'B' + primitiveTypeNumber primitiveTypeKind = 'N' + primitiveTypeString primitiveTypeKind = 'S' +) + +func (t primitiveType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(primitiveType); ok { + return otherP.Kind == t.Kind + } + return false +} + +func (t primitiveType) FriendlyName(mode friendlyTypeNameMode) string { + switch t.Kind { + case primitiveTypeBool: + return "bool" + case primitiveTypeNumber: + return "number" + case primitiveTypeString: + return "string" + default: + // should never happen + panic("invalid primitive type") + } +} + +func (t primitiveType) GoString() string { + switch t.Kind { + case primitiveTypeBool: + return "cty.Bool" + case primitiveTypeNumber: + return "cty.Number" + case primitiveTypeString: + return "cty.String" + default: + // should never happen + panic("invalid primitive type") + } +} + +// Number is the numeric type. Number values are arbitrary-precision +// decimal numbers, which can then be converted into Go's various numeric +// types only if they are in the appropriate range. +var Number Type + +// String is the string type. String values are sequences of unicode codepoints +// encoded internally as UTF-8. +var String Type + +// Bool is the boolean type. The two values of this type are True and False. +var Bool Type + +// True is the truthy value of type Bool +var True Value + +// False is the falsey value of type Bool +var False Value + +// Zero is a number value representing exactly zero. +var Zero Value + +// PositiveInfinity is a Number value representing positive infinity +var PositiveInfinity Value + +// NegativeInfinity is a Number value representing negative infinity +var NegativeInfinity Value + +func init() { + Number = Type{ + primitiveType{Kind: primitiveTypeNumber}, + } + String = Type{ + primitiveType{Kind: primitiveTypeString}, + } + Bool = Type{ + primitiveType{Kind: primitiveTypeBool}, + } + True = Value{ + ty: Bool, + v: true, + } + False = Value{ + ty: Bool, + v: false, + } + Zero = Value{ + ty: Number, + v: big.NewFloat(0), + } + PositiveInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(false), + } + NegativeInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(true), + } +} + +// IsPrimitiveType returns true if and only if the reciever is a primitive +// type, which means it's either number, string, or bool. Any two primitive +// types can be safely compared for equality using the standard == operator +// without panic, which is not a guarantee that holds for all types. Primitive +// types can therefore also be used in switch statements. +func (t Type) IsPrimitiveType() bool { + _, ok := t.typeImpl.(primitiveType) + return ok +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/gob.go b/vendor/github.com/hashicorp/go-cty/cty/set/gob.go new file mode 100644 index 00000000..da2978f6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/gob.go @@ -0,0 +1,76 @@ +package set + +import ( + "bytes" + "encoding/gob" + "fmt" +) + +// GobEncode is an implementation of the interface gob.GobEncoder, allowing +// sets to be included in structures encoded via gob. +// +// The set rules are included in the serialized value, so the caller must +// register its concrete rules type with gob.Register before using a +// set in a gob, and possibly also implement GobEncode/GobDecode to customize +// how any parameters are persisted. +// +// The set elements are also included, so if they are of non-primitive types +// they too must be registered with gob. +// +// If the produced gob values will persist for a long time, the caller must +// ensure compatibility of the rules implementation. In particular, if the +// definition of element equivalence changes between encoding and decoding +// then two distinct stored elements may be considered equivalent on decoding, +// causing the recovered set to have fewer elements than when it was stored. +func (s Set) GobEncode() ([]byte, error) { + gs := gobSet{ + Version: 0, + Rules: s.rules, + Values: s.Values(), + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + err := enc.Encode(gs) + if err != nil { + return nil, fmt.Errorf("error encoding set.Set: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is the opposite of GobEncode. See GobEncode for information +// on the requirements for and caveats of including set values in gobs. +func (s *Set) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gs gobSet + err := dec.Decode(&gs) + if err != nil { + return fmt.Errorf("error decoding set.Set: %s", err) + } + if gs.Version != 0 { + return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version) + } + + victim := NewSetFromSlice(gs.Rules, gs.Values) + s.vals = victim.vals + s.rules = victim.rules + return nil +} + +type gobSet struct { + Version int + Rules Rules + + // The bucket-based representation is for efficient in-memory access, but + // for serialization it's enough to just retain the values themselves, + // which we can re-bucket using the rules (which may have changed!) when + // we re-inflate. + Values []interface{} +} + +func init() { + gob.Register([]interface{}(nil)) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go b/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go new file mode 100644 index 00000000..4a60494f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go @@ -0,0 +1,15 @@ +package set + +type Iterator struct { + vals []interface{} + idx int +} + +func (it *Iterator) Value() interface{} { + return it.vals[it.idx] +} + +func (it *Iterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/ops.go b/vendor/github.com/hashicorp/go-cty/cty/set/ops.go new file mode 100644 index 00000000..fd1555f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/ops.go @@ -0,0 +1,210 @@ +package set + +import ( + "sort" +) + +// Add inserts the given value into the receiving Set. +// +// This mutates the set in-place. This operation is not thread-safe. +func (s Set) Add(val interface{}) { + hv := s.rules.Hash(val) + if _, ok := s.vals[hv]; !ok { + s.vals[hv] = make([]interface{}, 0, 1) + } + bucket := s.vals[hv] + + // See if an equivalent value is already present + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return + } + } + + s.vals[hv] = append(bucket, val) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s Set) Remove(val interface{}) { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return + } + + for i, ev := range bucket { + if s.rules.Equivalent(val, ev) { + newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket = append(newBucket, bucket[:i]...) + newBucket = append(newBucket, bucket[i+1:]...) + if len(newBucket) > 0 { + s.vals[hv] = newBucket + } else { + delete(s.vals, hv) + } + return + } + } +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s Set) Has(val interface{}) bool { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return false + } + + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return true + } + } + return false +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s Set) Copy() Set { + ret := NewSet(s.rules) + for k, v := range s.vals { + ret.vals[k] = v + } + return ret +} + +// Iterator returns an iterator over values in the set. If the set's rules +// implement OrderedRules then the result is ordered per those rules. If +// no order is provided, or if it is not a total order, then the iteration +// order is undefined but consistent for a particular version of cty. Do not +// rely on specific ordering between cty releases unless the rules order is a +// total order. +// +// The pattern for using the returned iterator is: +// +// it := set.Iterator() +// for it.Next() { +// val := it.Value() +// // ... +// } +// +// Once an iterator has been created for a set, the set *must not* be mutated +// until the iterator is no longer in use. +func (s Set) Iterator() *Iterator { + vals := s.Values() + + return &Iterator{ + vals: vals, + idx: -1, + } +} + +// EachValue calls the given callback once for each value in the set, in an +// undefined order that callers should not depend on. +func (s Set) EachValue(cb func(interface{})) { + it := s.Iterator() + for it.Next() { + cb(it.Value()) + } +} + +// Values returns a slice of all the values in the set. If the set rules have +// an order then the result is in that order. If no order is provided or if +// it is not a total order then the result order is undefined, but consistent +// for a particular set value within a specific release of cty. +func (s Set) Values() []interface{} { + var ret []interface{} + // Sort the bucketIds to ensure that we always traverse in a + // consistent order. + bucketIDs := make([]int, 0, len(s.vals)) + for id := range s.vals { + bucketIDs = append(bucketIDs, id) + } + sort.Ints(bucketIDs) + + for _, bucketID := range bucketIDs { + ret = append(ret, s.vals[bucketID]...) + } + + if orderRules, ok := s.rules.(OrderedRules); ok { + sort.SliceStable(ret, func(i, j int) bool { + return orderRules.Less(ret[i], ret[j]) + }) + } + + return ret +} + +// Length returns the number of values in the set. +func (s Set) Length() int { + var count int + for _, bucket := range s.vals { + count = count + len(bucket) + } + return count +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same rules, or +// else this function will panic. +func (s1 Set) Union(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + rs.Add(v) + }) + s2.EachValue(func(v interface{}) { + rs.Add(v) + }) + return rs +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Intersection(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Subtract(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same rules, or else this function will +// panic. +func (s1 Set) SymmetricDifference(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + rs.Add(v) + } + }) + return rs +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/rules.go b/vendor/github.com/hashicorp/go-cty/cty/set/rules.go new file mode 100644 index 00000000..51f744b5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/rules.go @@ -0,0 +1,43 @@ +package set + +// Rules represents the operations that define membership for a Set. +// +// Each Set has a Rules instance, whose methods must satisfy the interface +// contracts given below for any value that will be added to the set. +type Rules interface { + // Hash returns an int that somewhat-uniquely identifies the given value. + // + // A good hash function will minimize collisions for values that will be + // added to the set, though collisions *are* permitted. Collisions will + // simply reduce the efficiency of operations on the set. + Hash(interface{}) int + + // Equivalent returns true if and only if the two values are considered + // equivalent for the sake of set membership. Two values that are + // equivalent cannot exist in the set at the same time, and if two + // equivalent values are added it is undefined which one will be + // returned when enumerating all of the set members. + // + // Two values that are equivalent *must* result in the same hash value, + // though it is *not* required that two values with the same hash value + // be equivalent. + Equivalent(interface{}, interface{}) bool +} + +// OrderedRules is an extension of Rules that can apply a partial order to +// element values. When a set's Rules implements OrderedRules an iterator +// over the set will return items in the order described by the rules. +// +// If the given order is not a total order (that is, some pairs of non-equivalent +// elements do not have a defined order) then the resulting iteration order +// is undefined but consistent for a particular version of cty. The exact +// order in that case is not part of the contract and is subject to change +// between versions. +type OrderedRules interface { + Rules + + // Less returns true if and only if the first argument should sort before + // the second argument. If the second argument should sort before the first + // or if there is no defined order for the values, return false. + Less(interface{}, interface{}) bool +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/set.go b/vendor/github.com/hashicorp/go-cty/cty/set/set.go new file mode 100644 index 00000000..b4fb316f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/set.go @@ -0,0 +1,62 @@ +package set + +import ( + "fmt" +) + +// Set is an implementation of the concept of a set: a collection where all +// values are conceptually either in or out of the set, but the members are +// not ordered. +// +// This type primarily exists to be the internal type of sets in cty, but +// it is considered to be at the same level of abstraction as Go's built in +// slice and map collection types, and so should make no cty-specific +// assumptions. +// +// Set operations are not thread safe. It is the caller's responsibility to +// provide mutex guarantees where necessary. +// +// Set operations are not optimized to minimize memory pressure. Mutating +// a set will generally create garbage and so should perhaps be avoided in +// tight loops where memory pressure is a concern. +type Set struct { + vals map[int][]interface{} + rules Rules +} + +// NewSet returns an empty set with the membership rules given. +func NewSet(rules Rules) Set { + return Set{ + vals: map[int][]interface{}{}, + rules: rules, + } +} + +func NewSetFromSlice(rules Rules, vals []interface{}) Set { + s := NewSet(rules) + for _, v := range vals { + s.Add(v) + } + return s +} + +func sameRules(s1 Set, s2 Set) bool { + return s1.rules == s2.rules +} + +func mustHaveSameRules(s1 Set, s2 Set) { + if !sameRules(s1, s2) { + panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) + } +} + +// HasRules returns true if and only if the receiving set has the given rules +// instance as its rules. +func (s Set) HasRules(rules Rules) bool { + return s.rules == rules +} + +// Rules returns the receiving set's rules instance. +func (s Set) Rules() Rules { + return s.rules +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_helper.go b/vendor/github.com/hashicorp/go-cty/cty/set_helper.go new file mode 100644 index 00000000..31622842 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_helper.go @@ -0,0 +1,132 @@ +package cty + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty/set" +) + +// ValueSet is to cty.Set what []cty.Value is to cty.List and +// map[string]cty.Value is to cty.Map. It's provided to allow callers a +// convenient interface for manipulating sets before wrapping them in cty.Set +// values using cty.SetValFromValueSet. +// +// Unlike value slices and value maps, ValueSet instances have a single +// homogenous element type because that is a requirement of the underlying +// set implementation, which uses the element type to select a suitable +// hashing function. +// +// Set mutations are not concurrency-safe. +type ValueSet struct { + // ValueSet is just a thin wrapper around a set.Set with our value-oriented + // "rules" applied. We do this so that the caller can work in terms of + // cty.Value objects even though the set internals use the raw values. + s set.Set +} + +// NewValueSet creates and returns a new ValueSet with the given element type. +func NewValueSet(ety Type) ValueSet { + return newValueSet(set.NewSet(setRules{Type: ety})) +} + +func newValueSet(s set.Set) ValueSet { + return ValueSet{ + s: s, + } +} + +// ElementType returns the element type for the receiving ValueSet. +func (s ValueSet) ElementType() Type { + return s.s.Rules().(setRules).Type +} + +// Add inserts the given value into the receiving set. +func (s ValueSet) Add(v Value) { + s.requireElementType(v) + s.s.Add(v.v) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s ValueSet) Remove(v Value) { + s.requireElementType(v) + s.s.Remove(v.v) +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s ValueSet) Has(v Value) bool { + s.requireElementType(v) + return s.s.Has(v.v) +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s ValueSet) Copy() ValueSet { + return newValueSet(s.s.Copy()) +} + +// Length returns the number of values in the set. +func (s ValueSet) Length() int { + return s.s.Length() +} + +// Values returns a slice of all of the values in the set in no particular +// order. +func (s ValueSet) Values() []Value { + l := s.s.Length() + if l == 0 { + return nil + } + ret := make([]Value, 0, l) + ety := s.ElementType() + for it := s.s.Iterator(); it.Next(); { + ret = append(ret, Value{ + ty: ety, + v: it.Value(), + }) + } + return ret +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same element type, +// or else this function will panic. +func (s ValueSet) Union(other ValueSet) ValueSet { + return newValueSet(s.s.Union(other.s)) +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Intersection(other ValueSet) ValueSet { + return newValueSet(s.s.Intersection(other.s)) +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Subtract(other ValueSet) ValueSet { + return newValueSet(s.s.Subtract(other.s)) +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same element type, or else this function +// will panic. +func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet { + return newValueSet(s.s.SymmetricDifference(other.s)) +} + +// requireElementType panics if the given value is not of the set's element type. +// +// It also panics if the given value is marked, because marked values cannot +// be stored in sets. +func (s ValueSet) requireElementType(v Value) { + if v.IsMarked() { + panic("cannot store marked value directly in a set (make the set itself unknown instead)") + } + if !v.Type().Equals(s.ElementType()) { + panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType())) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_internals.go b/vendor/github.com/hashicorp/go-cty/cty/set_internals.go new file mode 100644 index 00000000..40801980 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_internals.go @@ -0,0 +1,244 @@ +package cty + +import ( + "bytes" + "fmt" + "hash/crc32" + "math/big" + "sort" + + "github.com/hashicorp/go-cty/cty/set" +) + +// setRules provides a Rules implementation for the ./set package that +// respects the equality rules for cty values of the given type. +// +// This implementation expects that values added to the set will be +// valid internal values for the given Type, which is to say that wrapping +// the given value in a Value struct along with the ruleset's type should +// produce a valid, working Value. +type setRules struct { + Type Type +} + +var _ set.OrderedRules = setRules{} + +// Hash returns a hash value for the receiver that can be used for equality +// checks where some inaccuracy is tolerable. +// +// The hash function is value-type-specific, so it is not meaningful to compare +// hash results for values of different types. +// +// This function is not safe to use for security-related applications, since +// the hash used is not strong enough. +func (val Value) Hash() int { + hashBytes, marks := makeSetHashBytes(val) + if len(marks) > 0 { + panic("can't take hash of value that has marks or has embedded values that have marks") + } + return int(crc32.ChecksumIEEE(hashBytes)) +} + +func (r setRules) Hash(v interface{}) int { + return Value{ + ty: r.Type, + v: v, + }.Hash() +} + +func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + eqv := v1v.Equals(v2v) + + // By comparing the result to true we ensure that an Unknown result, + // which will result if either value is unknown, will be considered + // as non-equivalent. Two unknown values are not equivalent for the + // sake of set membership. + return eqv.v == true +} + +// Less is an implementation of set.OrderedRules so that we can iterate over +// set elements in a consistent order, where such an order is possible. +func (r setRules) Less(v1, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + if v1v.RawEquals(v2v) { // Easy case: if they are equal then v1 can't be less + return false + } + + // Null values always sort after non-null values + if v2v.IsNull() && !v1v.IsNull() { + return true + } else if v1v.IsNull() { + return false + } + // Unknown values always sort after known values + if v1v.IsKnown() && !v2v.IsKnown() { + return true + } else if !v1v.IsKnown() { + return false + } + + switch r.Type { + case String: + // String values sort lexicographically + return v1v.AsString() < v2v.AsString() + case Bool: + // Weird to have a set of bools, but if we do then false sorts before true. + if v2v.True() || !v1v.True() { + return true + } + return false + case Number: + v1f := v1v.AsBigFloat() + v2f := v2v.AsBigFloat() + return v1f.Cmp(v2f) < 0 + default: + // No other types have a well-defined ordering, so we just produce a + // default consistent-but-undefined ordering then. This situation is + // not considered a compatibility constraint; callers should rely only + // on the ordering rules for primitive values. + v1h, _ := makeSetHashBytes(v1v) + v2h, _ := makeSetHashBytes(v2v) + return bytes.Compare(v1h, v2h) < 0 + } +} + +func makeSetHashBytes(val Value) ([]byte, ValueMarks) { + var buf bytes.Buffer + marks := make(ValueMarks) + appendSetHashBytes(val, &buf, marks) + return buf.Bytes(), marks +} + +func appendSetHashBytes(val Value, buf *bytes.Buffer, marks ValueMarks) { + // Exactly what bytes we generate here don't matter as long as the following + // constraints hold: + // - Unknown and null values all generate distinct strings from + // each other and from any normal value of the given type. + // - The delimiter used to separate items in a compound structure can + // never appear literally in any of its elements. + // Since we don't support hetrogenous lists we don't need to worry about + // collisions between values of different types, apart from + // PseudoTypeDynamic. + // If in practice we *do* get a collision then it's not a big deal because + // the Equivalent function will still distinguish values, but set + // performance will be best if we are able to produce a distinct string + // for each distinct value, unknown values notwithstanding. + + // Marks aren't considered part of a value for equality-testing purposes, + // so we'll unmark our value before we work with it but we'll remember + // the marks in case the caller needs to re-apply them to a derived + // value. + if val.IsMarked() { + unmarkedVal, valMarks := val.Unmark() + for m := range valMarks { + marks[m] = struct{}{} + } + val = unmarkedVal + } + + if !val.IsKnown() { + buf.WriteRune('?') + return + } + if val.IsNull() { + buf.WriteRune('~') + return + } + + switch val.ty { + case Number: + // Due to an unfortunate quirk of gob encoding for big.Float, we end up + // with non-pointer values immediately after a gob round-trip, and + // we end up in here before we've had a chance to run + // gobDecodeFixNumberPtr on the inner values of a gob-encoded set, + // and so sadly we must make a special effort to handle that situation + // here just so that we can get far enough along to fix it up for + // everything else in this package. + if bf, ok := val.v.(big.Float); ok { + buf.WriteString(bf.String()) + return + } + buf.WriteString(val.v.(*big.Float).String()) + return + case Bool: + if val.v.(bool) { + buf.WriteRune('T') + } else { + buf.WriteRune('F') + } + return + case String: + buf.WriteString(fmt.Sprintf("%q", val.v.(string))) + return + } + + if val.ty.IsMapType() { + buf.WriteRune('{') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(keyVal, buf, marks) + buf.WriteRune(':') + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune('}') + return + } + + if val.ty.IsListType() || val.ty.IsSetType() { + buf.WriteRune('[') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune(']') + return + } + + if val.ty.IsObjectType() { + buf.WriteRune('<') + attrNames := make([]string, 0, len(val.ty.AttributeTypes())) + for attrName := range val.ty.AttributeTypes() { + attrNames = append(attrNames, attrName) + } + sort.Strings(attrNames) + for _, attrName := range attrNames { + appendSetHashBytes(val.GetAttr(attrName), buf, marks) + buf.WriteRune(';') + } + buf.WriteRune('>') + return + } + + if val.ty.IsTupleType() { + buf.WriteRune('<') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune('>') + return + } + + // should never get down here + panic("unsupported type in set hash") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_type.go b/vendor/github.com/hashicorp/go-cty/cty/set_type.go new file mode 100644 index 00000000..cbc3706f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_type.go @@ -0,0 +1,72 @@ +package cty + +import ( + "fmt" +) + +type typeSet struct { + typeImplSigil + ElementTypeT Type +} + +// Set creates a set type with the given element Type. +// +// Set types are CollectionType implementations. +func Set(elem Type) Type { + return Type{ + typeSet{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a set whose element type is +// equal to that of the receiver. +func (t typeSet) Equals(other Type) bool { + ot, isSet := other.typeImpl.(typeSet) + if !isSet { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeSet) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "set of " + elemName +} + +func (t typeSet) ElementType() Type { + return t.ElementTypeT +} + +func (t typeSet) GoString() string { + return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT) +} + +// IsSetType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsSetType() bool { + _, ok := t.typeImpl.(typeSet) + return ok +} + +// SetElementType is a convenience method that checks if the given type is +// a set type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.SetElementType(); et != nil { +// // Do something with *et +// } +func (t Type) SetElementType() *Type { + if lt, ok := t.typeImpl.(typeSet); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go b/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go new file mode 100644 index 00000000..798cacd6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go @@ -0,0 +1,121 @@ +package cty + +import ( + "fmt" +) + +type typeTuple struct { + typeImplSigil + ElemTypes []Type +} + +// Tuple creates a tuple type with the given element types. +// +// After a slice is passed to this function the caller must no longer access +// the underlying array, since ownership is transferred to this library. +func Tuple(elemTypes []Type) Type { + return Type{ + typeTuple{ + ElemTypes: elemTypes, + }, + } +} + +func (t typeTuple) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeTuple); ok { + if len(t.ElemTypes) != len(ot.ElemTypes) { + // Fast path: if we don't have the same number of elements + // then we can't possibly be equal. + return false + } + + for i, ty := range t.ElemTypes { + oty := ot.ElemTypes[i] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeTuple) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write a tuple type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // a tuple type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "tuple" +} + +func (t typeTuple) GoString() string { + if len(t.ElemTypes) == 0 { + return "cty.EmptyTuple" + } + return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes) +} + +// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about +// the empty tuple type. +var EmptyTuple Type + +// EmptyTupleVal is the only possible non-null, non-unknown value of type +// EmptyTuple. +var EmptyTupleVal Value + +func init() { + EmptyTuple = Tuple([]Type{}) + EmptyTupleVal = Value{ + ty: EmptyTuple, + v: []interface{}{}, + } +} + +// IsTupleType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsTupleType() bool { + _, ok := t.typeImpl.(typeTuple) + return ok +} + +// Length returns the number of elements of the receiving tuple type. +// Will panic if the reciever isn't a tuple type; use IsTupleType to determine +// whether this operation will succeed. +func (t Type) Length() int { + if ot, ok := t.typeImpl.(typeTuple); ok { + return len(ot.ElemTypes) + } + panic("Length on non-tuple Type") +} + +// TupleElementType returns the type of the element with the given index. Will +// panic if the receiver is not a tuple type (use IsTupleType to confirm) +// or if the index is out of range (use Length to confirm). +func (t Type) TupleElementType(idx int) Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes[idx] + } + panic("TupleElementType on non-tuple Type") +} + +// TupleElementTypes returns a slice of the recieving tuple type's element +// types. Will panic if the receiver is not a tuple type (use IsTupleType +// to confirm). +// +// The returned slice is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the +// underlying array. For many purposes the element-related methods of Value +// are more appropriate and more convenient to use. +func (t Type) TupleElementTypes() []Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes + } + panic("TupleElementTypes on non-tuple Type") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/type.go b/vendor/github.com/hashicorp/go-cty/cty/type.go new file mode 100644 index 00000000..730cb986 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/type.go @@ -0,0 +1,120 @@ +package cty + +// Type represents value types within the type system. +// +// This is a closed interface type, meaning that only the concrete +// implementations provided within this package are considered valid. +type Type struct { + typeImpl +} + +type typeImpl interface { + // isTypeImpl is a do-nothing method that exists only to express + // that a type is an implementation of typeImpl. + isTypeImpl() typeImplSigil + + // Equals returns true if the other given Type exactly equals the + // receiver Type. + Equals(other Type) bool + + // FriendlyName returns a human-friendly *English* name for the given + // type. + FriendlyName(mode friendlyTypeNameMode) string + + // GoString implements the GoStringer interface from package fmt. + GoString() string +} + +// Base implementation of Type to embed into concrete implementations +// to signal that they are implementations of Type. +type typeImplSigil struct{} + +func (t typeImplSigil) isTypeImpl() typeImplSigil { + return typeImplSigil{} +} + +// Equals returns true if the other given Type exactly equals the receiver +// type. +func (t Type) Equals(other Type) bool { + return t.typeImpl.Equals(other) +} + +// FriendlyName returns a human-friendly *English* name for the given type. +func (t Type) FriendlyName() string { + return t.typeImpl.FriendlyName(friendlyTypeName) +} + +// FriendlyNameForConstraint is similar to FriendlyName except that the +// result is specialized for describing type _constraints_ rather than types +// themselves. This is more appropriate when reporting that a particular value +// does not conform to an expected type constraint. +// +// In particular, this function uses the term "any type" to refer to +// cty.DynamicPseudoType, rather than "dynamic" as returned by FriendlyName. +func (t Type) FriendlyNameForConstraint() string { + return t.typeImpl.FriendlyName(friendlyTypeConstraintName) +} + +// friendlyNameMode is an internal combination of the various FriendlyName* +// variants that just directly takes a mode, for easy passthrough for +// recursive name construction. +func (t Type) friendlyNameMode(mode friendlyTypeNameMode) string { + return t.typeImpl.FriendlyName(mode) +} + +// GoString returns a string approximating how the receiver type would be +// expressed in Go source code. +func (t Type) GoString() string { + if t.typeImpl == nil { + return "cty.NilType" + } + + return t.typeImpl.GoString() +} + +// NilType is an invalid type used when a function is returning an error +// and has no useful type to return. It should not be used and any methods +// called on it will panic. +var NilType = Type{} + +// HasDynamicTypes returns true either if the receiver is itself +// DynamicPseudoType or if it is a compound type whose descendent elements +// are DynamicPseudoType. +func (t Type) HasDynamicTypes() bool { + switch { + case t == DynamicPseudoType: + return true + case t.IsPrimitiveType(): + return false + case t.IsCollectionType(): + return false + case t.IsObjectType(): + attrTypes := t.AttributeTypes() + for _, at := range attrTypes { + if at.HasDynamicTypes() { + return true + } + } + return false + case t.IsTupleType(): + elemTypes := t.TupleElementTypes() + for _, et := range elemTypes { + if et.HasDynamicTypes() { + return true + } + } + return false + case t.IsCapsuleType(): + return false + default: + // Should never happen, since above should be exhaustive + panic("HasDynamicTypes does not support the given type") + } +} + +type friendlyTypeNameMode rune + +const ( + friendlyTypeName friendlyTypeNameMode = 'N' + friendlyTypeConstraintName friendlyTypeNameMode = 'C' +) diff --git a/vendor/github.com/hashicorp/go-cty/cty/type_conform.go b/vendor/github.com/hashicorp/go-cty/cty/type_conform.go new file mode 100644 index 00000000..476eeea8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/type_conform.go @@ -0,0 +1,139 @@ +package cty + +// TestConformance recursively walks the receiver and the given other type and +// returns nil if the receiver *conforms* to the given type. +// +// Type conformance is similar to type equality but has one crucial difference: +// PseudoTypeDynamic can be used within the given type to represent that +// *any* type is allowed. +// +// If any non-conformities are found, the returned slice will be non-nil and +// contain at least one error value. It will be nil if the type is entirely +// conformant. +// +// Note that the special behavior of PseudoTypeDynamic is the *only* exception +// to normal type equality. Calling applications may wish to apply their own +// automatic conversion logic to the given data structure to create a more +// liberal notion of conformance to a type. +// +// Returned errors are usually (but not always) PathError instances that +// indicate where in the structure the error was found. If a returned error +// is of that type then the error message is written for (English-speaking) +// end-users working within the cty type system, not mentioning any Go-oriented +// implementation details. +func (t Type) TestConformance(other Type) []error { + path := make(Path, 0) + var errs []error + testConformance(t, other, path, &errs) + return errs +} + +func testConformance(given Type, want Type, path Path, errs *[]error) { + if want.Equals(DynamicPseudoType) { + // anything goes! + return + } + + if given.Equals(want) { + // Any equal types are always conformant + return + } + + // The remainder of this function is concerned with detecting + // and reporting the specific non-conformance, since we wouldn't + // have got here if the types were not divergent. + // We treat compound structures as special so that we can report + // specifically what is non-conforming, rather than simply returning + // the entire type names and letting the user puzzle it out. + + if given.IsObjectType() && want.IsObjectType() { + givenAttrs := given.AttributeTypes() + wantAttrs := want.AttributeTypes() + + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) + } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) + } + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for k, wantAttrType := range wantAttrs { + if givenAttrType, exists := givenAttrs[k]; exists { + path[pathIdx] = GetAttrStep{Name: k} + testConformance(givenAttrType, wantAttrType, path, errs) + } + } + + path = path[0:pathIdx] + + return + } + + if given.IsTupleType() && want.IsTupleType() { + givenElems := given.TupleElementTypes() + wantElems := want.TupleElementTypes() + + if len(givenElems) != len(wantElems) { + *errs = append( + *errs, + errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)), + ) + return + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for i, wantElemType := range wantElems { + givenElemType := givenElems[i] + path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))} + testConformance(givenElemType, wantElemType, path, errs) + } + + path = path[0:pathIdx] + + return + } + + if given.IsListType() && want.IsListType() { + path = append(path, IndexStep{Key: UnknownVal(Number)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsMapType() && want.IsMapType() { + path = append(path, IndexStep{Key: UnknownVal(String)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsSetType() && want.IsSetType() { + path = append(path, IndexStep{Key: UnknownVal(given.ElementType())}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + *errs = append( + *errs, + errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()), + ) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go b/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go new file mode 100644 index 00000000..ec05bb18 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go @@ -0,0 +1,57 @@ +package cty + +import ( + "encoding/gob" + "fmt" + "math/big" + "strings" + + "github.com/hashicorp/go-cty/cty/set" +) + +// InternalTypesToRegister is a slice of values that covers all of the +// internal types used in the representation of cty.Type and cty.Value +// across all cty Types. +// +// This is intended to be used to register these types with encoding +// packages that require registration of types used in interfaces, such as +// encoding/gob, thus allowing cty types and values to be included in streams +// created from those packages. However, registering with gob is not necessary +// since that is done automatically as a side-effect of importing this package. +// +// Callers should not do anything with the values here except pass them on +// verbatim to a registration function. +// +// If the calling application uses Capsule types that wrap local structs either +// directly or indirectly, these structs may also need to be registered in +// order to support encoding and decoding of values of these types. That is the +// responsibility of the calling application. +var InternalTypesToRegister []interface{} + +func init() { + InternalTypesToRegister = []interface{}{ + primitiveType{}, + typeList{}, + typeMap{}, + typeObject{}, + typeSet{}, + setRules{}, + set.Set{}, + typeTuple{}, + big.Float{}, + capsuleType{}, + []interface{}(nil), + map[string]interface{}(nil), + } + + // Register these with gob here, rather than in gob.go, to ensure + // that this will always happen after we build the above. + for _, tv := range InternalTypesToRegister { + typeName := fmt.Sprintf("%T", tv) + if strings.HasPrefix(typeName, "cty.") { + gob.RegisterName(fmt.Sprintf("github.com/hashicorp/go-cty/%s", typeName), tv) + } else { + gob.Register(tv) + } + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/unknown.go b/vendor/github.com/hashicorp/go-cty/cty/unknown.go new file mode 100644 index 00000000..e54179eb --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/unknown.go @@ -0,0 +1,84 @@ +package cty + +// unknownType is the placeholder type used for the sigil value representing +// "Unknown", to make it unambigiously distinct from any other possible value. +type unknownType struct { +} + +// Unknown is a special value that can be +var unknown interface{} = &unknownType{} + +// UnknownVal returns an Value that represents an unknown value of the given +// type. Unknown values can be used to represent a value that is +// not yet known. Its meaning is undefined in cty, but it could be used by +// an calling application to allow partial evaluation. +// +// Unknown values of any type can be created of any type. All operations on +// Unknown values themselves return Unknown. +func UnknownVal(t Type) Value { + return Value{ + ty: t, + v: unknown, + } +} + +func (t unknownType) GoString() string { + // This is the stringification of our internal unknown marker. The + // stringification of the public representation of unknowns is in + // Value.GoString. + return "cty.unknown" +} + +type pseudoTypeDynamic struct { + typeImplSigil +} + +// DynamicPseudoType represents the dynamic pseudo-type. +// +// This type can represent situations where a type is not yet known. Its +// meaning is undefined in cty, but it could be used by a calling +// application to allow expression type checking with some types not yet known. +// For example, the application might optimistically permit any operation on +// values of this type in type checking, allowing a partial type-check result, +// and then repeat the check when more information is known to get the +// final, concrete type. +// +// It is a pseudo-type because it is used only as a sigil to the calling +// application. "Unknown" is the only valid value of this pseudo-type, so +// operations on values of this type will always short-circuit as per +// the rules for that special value. +var DynamicPseudoType Type + +func (t pseudoTypeDynamic) Equals(other Type) bool { + _, ok := other.typeImpl.(pseudoTypeDynamic) + return ok +} + +func (t pseudoTypeDynamic) FriendlyName(mode friendlyTypeNameMode) string { + switch mode { + case friendlyTypeConstraintName: + return "any type" + default: + return "dynamic" + } +} + +func (t pseudoTypeDynamic) GoString() string { + return "cty.DynamicPseudoType" +} + +// DynamicVal is the only valid value of the pseudo-type dynamic. +// This value can be used as a placeholder where a value or expression's +// type and value are both unknown, thus allowing partial evaluation. See +// the docs for DynamicPseudoType for more information. +var DynamicVal Value + +func init() { + DynamicPseudoType = Type{ + pseudoTypeDynamic{}, + } + DynamicVal = Value{ + ty: DynamicPseudoType, + v: unknown, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go b/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go new file mode 100644 index 00000000..ba926475 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go @@ -0,0 +1,64 @@ +package cty + +// UnknownAsNull returns a value of the same type as the given value but +// with any unknown values (including nested values) replaced with null +// values of the same type. +// +// This can be useful if a result is to be serialized in a format that can't +// represent unknowns, such as JSON, as long as the caller does not need to +// retain the unknown value information. +func UnknownAsNull(val Value) Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return NullVal(ty) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make([]Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, UnknownAsNull(v)) + } + switch { + case ty.IsListType(): + return ListVal(vals) + case ty.IsTupleType(): + return TupleVal(vals) + default: + return SetVal(vals) + } + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make(map[string]Value, length) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vals[k.AsString()] = UnknownAsNull(v) + } + switch { + case ty.IsMapType(): + return MapVal(vals) + default: + return ObjectVal(vals) + } + } + + return val +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value.go b/vendor/github.com/hashicorp/go-cty/cty/value.go new file mode 100644 index 00000000..1025ba82 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value.go @@ -0,0 +1,108 @@ +package cty + +// Value represents a value of a particular type, and is the interface by +// which operations are executed on typed values. +// +// Value has two different classes of method. Operation methods stay entirely +// within the type system (methods accept and return Value instances) and +// are intended for use in implementing a language in terms of cty, while +// integration methods either enter or leave the type system, working with +// native Go values. Operation methods are guaranteed to support all of the +// expected short-circuit behavior for unknown and dynamic values, while +// integration methods may not. +// +// The philosophy for the operations API is that it's the caller's +// responsibility to ensure that the given types and values satisfy the +// specified invariants during a separate type check, so that the caller is +// able to return errors to its user from the application's own perspective. +// +// Consequently the design of these methods assumes such checks have already +// been done and panics if any invariants turn out not to be satisfied. These +// panic errors are not intended to be handled, but rather indicate a bug in +// the calling application that should be fixed with more checks prior to +// executing operations. +// +// A related consequence of this philosophy is that no automatic type +// conversions are done. If a method specifies that its argument must be +// number then it's the caller's responsibility to do that conversion before +// the call, thus allowing the application to have more constrained conversion +// rules than are offered by the built-in converter where necessary. +type Value struct { + ty Type + v interface{} +} + +// Type returns the type of the value. +func (val Value) Type() Type { + return val.ty +} + +// IsKnown returns true if the value is known. That is, if it is not +// the result of the unknown value constructor Unknown(...), and is not +// the result of an operation on another unknown value. +// +// Unknown values are only produced either directly or as a result of +// operating on other unknown values, and so an application that never +// introduces Unknown values can be guaranteed to never receive any either. +func (val Value) IsKnown() bool { + if val.IsMarked() { + return val.unmarkForce().IsKnown() + } + return val.v != unknown +} + +// IsNull returns true if the value is null. Values of any type can be +// null, but any operations on a null value will panic. No operation ever +// produces null, so an application that never introduces Null values can +// be guaranteed to never receive any either. +func (val Value) IsNull() bool { + if val.IsMarked() { + return val.unmarkForce().IsNull() + } + return val.v == nil +} + +// NilVal is an invalid Value that can be used as a placeholder when returning +// with an error from a function that returns (Value, error). +// +// NilVal is *not* a valid error and so no operations may be performed on it. +// Any attempt to use it will result in a panic. +// +// This should not be confused with the idea of a Null value, as returned by +// NullVal. NilVal is a nil within the *Go* type system, and is invalid in +// the cty type system. Null values *do* exist in the cty type system. +var NilVal = Value{ + ty: Type{typeImpl: nil}, + v: nil, +} + +// IsWhollyKnown is an extension of IsKnown that also recursively checks +// inside collections and structures to see if there are any nested unknown +// values. +func (val Value) IsWhollyKnown() bool { + if val.IsMarked() { + return val.unmarkForce().IsWhollyKnown() + } + + if !val.IsKnown() { + return false + } + + if val.IsNull() { + // Can't recurse into a null, so we're done + return true + } + + switch { + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsWhollyKnown() { + return false + } + } + return true + default: + return true + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value_init.go b/vendor/github.com/hashicorp/go-cty/cty/value_init.go new file mode 100644 index 00000000..853a5a7d --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value_init.go @@ -0,0 +1,324 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "golang.org/x/text/unicode/norm" + + "github.com/hashicorp/go-cty/cty/set" +) + +// BoolVal returns a Value of type Number whose internal value is the given +// bool. +func BoolVal(v bool) Value { + return Value{ + ty: Bool, + v: v, + } +} + +// NumberVal returns a Value of type Number whose internal value is the given +// big.Float. The returned value becomes the owner of the big.Float object, +// and so it's forbidden for the caller to mutate the object after it's +// wrapped in this way. +func NumberVal(v *big.Float) Value { + return Value{ + ty: Number, + v: v, + } +} + +// ParseNumberVal returns a Value of type number produced by parsing the given +// string as a decimal real number. To ensure that two identical strings will +// always produce an equal number, always use this function to derive a number +// from a string; it will ensure that the precision and rounding mode for the +// internal big decimal is configured in a consistent way. +// +// If the given string cannot be parsed as a number, the returned error has +// the message "a number is required", making it suitable to return to an +// end-user to signal a type conversion error. +// +// If the given string contains a number that becomes a recurring fraction +// when expressed in binary then it will be truncated to have a 512-bit +// mantissa. Note that this is a higher precision than that of a float64, +// so coverting the same decimal number first to float64 and then calling +// NumberFloatVal will not produce an equal result; the conversion first +// to float64 will round the mantissa to fewer than 512 bits. +func ParseNumberVal(s string) (Value, error) { + // Base 10, precision 512, and rounding to nearest even is the standard + // way to handle numbers arriving as strings. + f, _, err := big.ParseFloat(s, 10, 512, big.ToNearestEven) + if err != nil { + return NilVal, fmt.Errorf("a number is required") + } + return NumberVal(f), nil +} + +// MustParseNumberVal is like ParseNumberVal but it will panic in case of any +// error. It can be used during initialization or any other situation where +// the given string is a constant or otherwise known to be correct by the +// caller. +func MustParseNumberVal(s string) Value { + ret, err := ParseNumberVal(s) + if err != nil { + panic(err) + } + return ret +} + +// NumberIntVal returns a Value of type Number whose internal value is equal +// to the given integer. +func NumberIntVal(v int64) Value { + return NumberVal(new(big.Float).SetInt64(v)) +} + +// NumberUIntVal returns a Value of type Number whose internal value is equal +// to the given unsigned integer. +func NumberUIntVal(v uint64) Value { + return NumberVal(new(big.Float).SetUint64(v)) +} + +// NumberFloatVal returns a Value of type Number whose internal value is +// equal to the given float. +func NumberFloatVal(v float64) Value { + return NumberVal(new(big.Float).SetFloat64(v)) +} + +// StringVal returns a Value of type String whose internal value is the +// given string. +// +// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and +// they are NFC-normalized on entry into the world of cty values. +// +// If the given string is not valid UTF-8 then behavior of string operations +// is undefined. +func StringVal(v string) Value { + return Value{ + ty: String, + v: NormalizeString(v), + } +} + +// NormalizeString applies the same normalization that cty applies when +// constructing string values. +// +// A return value from this function can be meaningfully compared byte-for-byte +// with a Value.AsString result. +func NormalizeString(s string) string { + return norm.NFC.String(s) +} + +// ObjectVal returns a Value of an object type whose structure is defined +// by the key names and value types in the given map. +func ObjectVal(attrs map[string]Value) Value { + attrTypes := make(map[string]Type, len(attrs)) + attrVals := make(map[string]interface{}, len(attrs)) + + for attr, val := range attrs { + attr = NormalizeString(attr) + attrTypes[attr] = val.ty + attrVals[attr] = val.v + } + + return Value{ + ty: Object(attrTypes), + v: attrVals, + } +} + +// TupleVal returns a Value of a tuple type whose element types are +// defined by the value types in the given slice. +func TupleVal(elems []Value) Value { + elemTypes := make([]Type, len(elems)) + elemVals := make([]interface{}, len(elems)) + + for i, val := range elems { + elemTypes[i] = val.ty + elemVals[i] = val.v + } + + return Value{ + ty: Tuple(elemTypes), + v: elemVals, + } +} + +// ListVal returns a Value of list type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also ListValEmpty.) +func ListVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call ListVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent list element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + return Value{ + ty: List(elementType), + v: rawList, + } +} + +// ListValEmpty returns an empty list of the given element type. +func ListValEmpty(element Type) Value { + return Value{ + ty: List(element), + v: []interface{}{}, + } +} + +// MapVal returns a Value of a map type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given map is empty, since then the element type cannot be inferred. +// (See also MapValEmpty.) +func MapVal(vals map[string]Value) Value { + if len(vals) == 0 { + panic("must not call MapVal with empty map") + } + elementType := DynamicPseudoType + rawMap := make(map[string]interface{}, len(vals)) + + for key, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent map element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawMap[NormalizeString(key)] = val.v + } + + return Value{ + ty: Map(elementType), + v: rawMap, + } +} + +// MapValEmpty returns an empty map of the given element type. +func MapValEmpty(element Type) Value { + return Value{ + ty: Map(element), + v: map[string]interface{}{}, + } +} + +// SetVal returns a Value of set type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also SetValEmpty.) +func SetVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call SetVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + var markSets []ValueMarks + + for i, val := range vals { + if unmarkedVal, marks := val.UnmarkDeep(); len(marks) > 0 { + val = unmarkedVal + markSets = append(markSets, marks) + } + if val.ContainsMarked() { + // FIXME: Allow this, but unmark the values and apply the + // marking to the set itself instead. + panic("set cannot contain marked values") + } + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent set element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + + return Value{ + ty: Set(elementType), + v: rawVal, + }.WithMarks(markSets...) +} + +// SetValFromValueSet returns a Value of set type based on an already-constructed +// ValueSet. +// +// The element type of the returned value is the element type of the given +// set. +func SetValFromValueSet(s ValueSet) Value { + ety := s.ElementType() + rawVal := s.s.Copy() // copy so caller can't mutate what we wrap + + return Value{ + ty: Set(ety), + v: rawVal, + } +} + +// SetValEmpty returns an empty set of the given element type. +func SetValEmpty(element Type) Value { + return Value{ + ty: Set(element), + v: set.NewSet(setRules{element}), + } +} + +// CapsuleVal creates a value of the given capsule type using the given +// wrapVal, which must be a pointer to a value of the capsule type's native +// type. +// +// This function will panic if the given type is not a capsule type, if +// the given wrapVal is not compatible with the given capsule type, or if +// wrapVal is not a pointer. +func CapsuleVal(ty Type, wrapVal interface{}) Value { + if !ty.IsCapsuleType() { + panic("not a capsule type") + } + + wv := reflect.ValueOf(wrapVal) + if wv.Kind() != reflect.Ptr { + panic("wrapVal is not a pointer") + } + + it := ty.typeImpl.(*capsuleType).GoType + if !wv.Type().Elem().AssignableTo(it) { + panic("wrapVal target is not compatible with the given capsule type") + } + + return Value{ + ty: ty, + v: wrapVal, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value_ops.go b/vendor/github.com/hashicorp/go-cty/cty/value_ops.go new file mode 100644 index 00000000..69e5a8ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value_ops.go @@ -0,0 +1,1290 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty/set" +) + +// GoString is an implementation of fmt.GoStringer that produces concise +// source-like representations of values suitable for use in debug messages. +func (val Value) GoString() string { + if val.IsMarked() { + unVal, marks := val.Unmark() + if len(marks) == 1 { + var mark interface{} + for m := range marks { + mark = m + } + return fmt.Sprintf("%#v.Mark(%#v)", unVal, mark) + } + return fmt.Sprintf("%#v.WithMarks(%#v)", unVal, marks) + } + + if val == NilVal { + return "cty.NilVal" + } + + if val.IsNull() { + return fmt.Sprintf("cty.NullVal(%#v)", val.ty) + } + if val == DynamicVal { // is unknown, so must be before the IsKnown check below + return "cty.DynamicVal" + } + if !val.IsKnown() { + return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + } + + // By the time we reach here we've dealt with all of the exceptions around + // unknowns and nulls, so we're guaranteed that the values are the + // canonical internal representation of the given type. + + switch val.ty { + case Bool: + if val.v.(bool) { + return "cty.True" + } + return "cty.False" + case Number: + fv := val.v.(*big.Float) + // We'll try to use NumberIntVal or NumberFloatVal if we can, since + // the fully-general initializer call is pretty ugly-looking. + if fv.IsInt() { + return fmt.Sprintf("cty.NumberIntVal(%#v)", fv) + } + if rfv, accuracy := fv.Float64(); accuracy == big.Exact { + return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv) + } + return fmt.Sprintf("cty.MustParseNumberVal(%q)", fv.Text('f', -1)) + case String: + return fmt.Sprintf("cty.StringVal(%#v)", val.v) + } + + switch { + case val.ty.IsSetType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.SetValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.SetVal(%#v)", vals) + case val.ty.IsListType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.ListValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.ListVal(%#v)", vals) + case val.ty.IsMapType(): + vals := val.AsValueMap() + if len(vals) == 0 { + return fmt.Sprintf("cty.MapValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.MapVal(%#v)", vals) + case val.ty.IsTupleType(): + if val.ty.Equals(EmptyTuple) { + return "cty.EmptyTupleVal" + } + vals := val.AsValueSlice() + return fmt.Sprintf("cty.TupleVal(%#v)", vals) + case val.ty.IsObjectType(): + if val.ty.Equals(EmptyObject) { + return "cty.EmptyObjectVal" + } + vals := val.AsValueMap() + return fmt.Sprintf("cty.ObjectVal(%#v)", vals) + case val.ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().GoString + if impl == nil { + return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v) + } + return impl(val.EncapsulatedValue()) + } + + // Default exposes implementation details, so should actually cover + // all of the cases above for good caller UX. + return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v) +} + +// Equals returns True if the receiver and the given other value have the +// same type and are exactly equal in value. +// +// As a special case, two null values are always equal regardless of type. +// +// The usual short-circuit rules apply, so the result will be unknown if +// either of the given values are. +// +// Use RawEquals to compare if two values are equal *ignoring* the +// short-circuit rules and the exception for null values. +func (val Value) Equals(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Equals(other).WithMarks(valMarks, otherMarks) + } + + // Start by handling Unknown values before considering types. + // This needs to be done since Null values are always equal regardless of + // type. + switch { + case !val.IsKnown() && !other.IsKnown(): + // both unknown + return UnknownVal(Bool) + case val.IsKnown() && !other.IsKnown(): + switch { + case val.IsNull(), other.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !val.ty.Equals(other.ty): + // There is no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + case other.IsKnown() && !val.IsKnown(): + switch { + case other.IsNull(), val.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !other.ty.Equals(val.ty): + // There's no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + } + + switch { + case val.IsNull() && other.IsNull(): + // Nulls are always equal, regardless of type + return BoolVal(true) + case val.IsNull() || other.IsNull(): + // If only one is null then the result must be false + return BoolVal(false) + } + + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + return UnknownVal(Bool) + } + + if !val.ty.Equals(other.ty) { + return BoolVal(false) + } + + ty := val.ty + result := false + + switch { + case ty == Number: + result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + case ty == Bool: + result = val.v.(bool) == other.v.(bool) + case ty == String: + // Simple equality is safe because we NFC-normalize strings as they + // enter our world from StringVal, and so we can assume strings are + // always in normal form. + result = val.v.(string) == other.v.(string) + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + result = true + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + result = true + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + result = true + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + equal := true + + // Note that by our definition of sets it's never possible for two + // sets that contain unknown values (directly or indicrectly) to + // ever be equal, even if they are otherwise identical. + + // FIXME: iterating both lists and checking each item is not the + // ideal implementation here, but it works with the primitives we + // have in the set implementation. Perhaps the set implementation + // can provide its own equality test later. + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + equal = false + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + equal = false + } + }) + + result = equal + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + result = true + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + result = false + break + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().Equals + if impl == nil { + impl := val.ty.CapsuleOps().RawEquals + if impl == nil { + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return BoolVal(val.v == other.v) + } + return BoolVal(impl(val.v, other.v)) + } + ret := impl(val.v, other.v) + if !ret.Type().Equals(Bool) { + panic(fmt.Sprintf("Equals for %#v returned %#v, not cty.Bool", ty, ret.Type())) + } + return ret + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in Equals", ty)) + } + + return BoolVal(result) +} + +// NotEqual is a shorthand for Equals followed by Not. +func (val Value) NotEqual(other Value) Value { + return val.Equals(other).Not() +} + +// True returns true if the receiver is True, false if False, and panics if +// the receiver is not of type Bool. +// +// This is a helper function to help write application logic that works with +// values, rather than a first-class operation. It does not work with unknown +// or null values. For more robust handling with unknown value +// short-circuiting, use val.Equals(cty.True). +func (val Value) True() bool { + val.assertUnmarked() + if val.ty != Bool { + panic("not bool") + } + return val.Equals(True).v.(bool) +} + +// False is the opposite of True. +func (val Value) False() bool { + return !val.True() +} + +// RawEquals returns true if and only if the two given values have the same +// type and equal value, ignoring the usual short-circuit rules about +// unknowns and dynamic types. +// +// This method is more appropriate for testing than for real use, since it +// skips over usual semantics around unknowns but as a consequence allows +// testing the result of another operation that is expected to return unknown. +// It returns a primitive Go bool rather than a Value to remind us that it +// is not a first-class value operation. +func (val Value) RawEquals(other Value) bool { + if !val.ty.Equals(other.ty) { + return false + } + if !val.HasSameMarks(other) { + return false + } + // Since we've now checked the marks, we'll unmark for the rest of this... + val = val.unmarkForce() + other = other.unmarkForce() + + if (!val.IsKnown()) && (!other.IsKnown()) { + return true + } + if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { + return false + } + if val.IsNull() && other.IsNull() { + return true + } + if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) { + return false + } + if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType { + return true + } + + ty := val.ty + switch { + case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType: + return val.Equals(other).True() + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + + // Since we're intentionally ignoring our rule that two unknowns + // are never equal, we can cheat here. + // (This isn't 100% right since e.g. it will fail if the set contains + // numbers that are infinite, which DeepEqual can't compare properly. + // We're accepting that limitation for simplicity here, since this + // function is here primarily for testing.) + return reflect.DeepEqual(s1, s2) + + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + return false + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().RawEquals + if impl == nil { + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return val.v == other.v + } + return impl(val.v, other.v) + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty)) + } +} + +// Add returns the sum of the receiver and the given other value. Both values +// must be numbers; this method will panic if not. +func (val Value) Add(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Add(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Add(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Subtract returns receiver minus the given other value. Both values must be +// numbers; this method will panic if not. +func (val Value) Subtract(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Subtract(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + return val.Add(other.Negate()) +} + +// Negate returns the numeric negative of the receiver, which must be a number. +// This method will panic when given a value of any other type. +func (val Value) Negate() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Negate().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float).Neg(val.v.(*big.Float)) + return NumberVal(ret) +} + +// Multiply returns the product of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +func (val Value) Multiply(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Multiply(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Mul(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Divide returns the quotient of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// If both values are zero or infinity, this function will panic with +// an instance of big.ErrNaN. +func (val Value) Divide(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Divide(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Quo(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Modulo returns the remainder of an integer division of the receiver and +// the given other value. Both values must be numbers; this method will panic +// if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// This operation is primarily here for use with nonzero natural numbers. +// Modulo with "other" as a non-natural number gets somewhat philosophical, +// and this function takes a position on what that should mean, but callers +// may wish to disallow such things outright or implement their own modulo +// if they disagree with the interpretation used here. +func (val Value) Modulo(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Modulo(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + // We cheat a bit here with infinities, just abusing the Multiply operation + // to get an infinite result of the correct sign. + if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity { + return val.Multiply(other) + } + + if other.RawEquals(Zero) { + return val + } + + // FIXME: This is a bit clumsy. Should come back later and see if there's a + // more straightforward way to do this. + rat := val.Divide(other) + ratFloorInt := &big.Int{} + rat.v.(*big.Float).Int(ratFloorInt) + work := (&big.Float{}).SetInt(ratFloorInt) + work.Mul(other.v.(*big.Float), work) + work.Sub(val.v.(*big.Float), work) + + return NumberVal(work) +} + +// Absolute returns the absolute (signless) value of the receiver, which must +// be a number or this method will panic. +func (val Value) Absolute() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Absolute().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := (&big.Float{}).Abs(val.v.(*big.Float)) + return NumberVal(ret) +} + +// GetAttr returns the value of the given attribute of the receiver, which +// must be of an object type that has an attribute of the given name. +// This method will panic if the receiver type is not compatible. +// +// The method will also panic if the given attribute name is not defined +// for the value's type. Use the attribute-related methods on Type to +// check for the validity of an attribute before trying to use it. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be DynamicVal. +func (val Value) GetAttr(name string) Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.GetAttr(name).WithMarks(valMarks) + } + + if val.ty == DynamicPseudoType { + return DynamicVal + } + + if !val.ty.IsObjectType() { + panic("value is not an object") + } + + name = NormalizeString(name) + if !val.ty.HasAttribute(name) { + panic("value has no attribute of that name") + } + + attrType := val.ty.AttributeType(name) + + if !val.IsKnown() { + return UnknownVal(attrType) + } + + return Value{ + ty: attrType, + v: val.v.(map[string]interface{})[name], + } +} + +// Index returns the value of an element of the receiver, which must have +// either a list, map or tuple type. This method will panic if the receiver +// type is not compatible. +// +// The key value must be the correct type for the receving collection: a +// number if the collection is a list or tuple, or a string if it is a map. +// In the case of a list or tuple, the given number must be convertable to int +// or this method will panic. The key may alternatively be of +// DynamicPseudoType, in which case the result itself is an unknown of the +// collection's element type. +// +// The result is of the receiver collection's element type, or in the case +// of a tuple the type of the specific element index requested. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be the DynamicValue. +func (val Value) Index(key Value) Value { + if val.IsMarked() || key.IsMarked() { + val, valMarks := val.Unmark() + key, keyMarks := key.Unmark() + return val.Index(key).WithMarks(valMarks, keyMarks) + } + + if val.ty == DynamicPseudoType { + return DynamicVal + } + + switch { + case val.Type().IsListType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != Number { + panic("element key for list must be number") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + return Value{ + ty: elty, + v: val.v.([]interface{})[index], + } + case val.Type().IsMapType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != String { + panic("element key for map must be string") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + keyStr := key.v.(string) + + return Value{ + ty: elty, + v: val.v.(map[string]interface{})[keyStr], + } + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return DynamicVal + } + + if key.Type() != Number { + panic("element key for tuple must be number") + } + if !key.IsKnown() { + return DynamicVal + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + eltys := val.Type().TupleElementTypes() + + if !val.IsKnown() { + return UnknownVal(eltys[index]) + } + + return Value{ + ty: eltys[index], + v: val.v.([]interface{})[index], + } + default: + panic("not a list, map, or tuple type") + } +} + +// HasIndex returns True if the receiver (which must be supported for Index) +// has an element with the given index key, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the collection or the +// key value are unknown. +// +// This method will panic if the receiver is not indexable, but does not +// impose any panic-causing type constraints on the key. +func (val Value) HasIndex(key Value) Value { + if val.IsMarked() || key.IsMarked() { + val, valMarks := val.Unmark() + key, keyMarks := key.Unmark() + return val.HasIndex(key).WithMarks(valMarks, keyMarks) + } + + if val.ty == DynamicPseudoType { + return UnknownVal(Bool) + } + + switch { + case val.Type().IsListType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) + case val.Type().IsMapType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != String { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + keyStr := key.v.(string) + _, exists := val.v.(map[string]interface{})[keyStr] + + return BoolVal(exists) + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + length := val.Type().Length() + return BoolVal(int(index) < length && index >= 0) + default: + panic("not a list, map, or tuple type") + } +} + +// HasElement returns True if the receiver (which must be of a set type) +// has the given value as an element, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the set or the +// given value are unknown. +// +// This method will panic if the receiver is not a set, or if it is a null set. +func (val Value) HasElement(elem Value) Value { + if val.IsMarked() || elem.IsMarked() { + val, valMarks := val.Unmark() + elem, elemMarks := elem.Unmark() + return val.HasElement(elem).WithMarks(valMarks, elemMarks) + } + + ty := val.Type() + + if !ty.IsSetType() { + panic("not a set type") + } + if !val.IsKnown() || !elem.IsKnown() { + return UnknownVal(Bool) + } + if val.IsNull() { + panic("can't call HasElement on a nil value") + } + if !ty.ElementType().Equals(elem.Type()) { + return False + } + + s := val.v.(set.Set) + return BoolVal(s.Has(elem.v)) +} + +// Length returns the length of the receiver, which must be a collection type +// or tuple type, as a number value. If the receiver is not a compatible type +// then this method will panic. +// +// If the receiver is unknown then the result is also unknown. +// +// If the receiver is null then this function will panic. +// +// Note that Length is not supported for strings. To determine the length +// of a string, call AsString and take the length of the native Go string +// that is returned. +func (val Value) Length() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Length().WithMarks(valMarks) + } + + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return NumberIntVal(int64(val.Type().Length())) + } + + if !val.IsKnown() { + return UnknownVal(Number) + } + + return NumberIntVal(int64(val.LengthInt())) +} + +// LengthInt is like Length except it returns an int. It has the same behavior +// as Length except that it will panic if the receiver is unknown. +// +// This is an integration method provided for the convenience of code bridging +// into Go's type system. +func (val Value) LengthInt() int { + val.assertUnmarked() + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return val.Type().Length() + } + if val.Type().IsObjectType() { + // For objects, the length is the number of attributes associated with the type. + return len(val.Type().AttributeTypes()) + } + if !val.IsKnown() { + panic("value is not known") + } + if val.IsNull() { + panic("value is null") + } + + switch { + + case val.ty.IsListType(): + return len(val.v.([]interface{})) + + case val.ty.IsSetType(): + return val.v.(set.Set).Length() + + case val.ty.IsMapType(): + return len(val.v.(map[string]interface{})) + + default: + panic("value is not a collection") + } +} + +// ElementIterator returns an ElementIterator for iterating the elements +// of the receiver, which must be a collection type, a tuple type, or an object +// type. If called on a method of any other type, this method will panic. +// +// The value must be Known and non-Null, or this method will panic. +// +// If the receiver is of a list type, the returned keys will be of type Number +// and the values will be of the list's element type. +// +// If the receiver is of a map type, the returned keys will be of type String +// and the value will be of the map's element type. Elements are passed in +// ascending lexicographical order by key. +// +// If the receiver is of a set type, each element is returned as both the +// key and the value, since set members are their own identity. +// +// If the receiver is of a tuple type, the returned keys will be of type Number +// and the value will be of the corresponding element's type. +// +// If the receiver is of an object type, the returned keys will be of type +// String and the value will be of the corresponding attributes's type. +// +// ElementIterator is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ElementIterator() ElementIterator { + val.assertUnmarked() + if !val.IsKnown() { + panic("can't use ElementIterator on unknown value") + } + if val.IsNull() { + panic("can't use ElementIterator on null value") + } + return elementIterator(val) +} + +// CanIterateElements returns true if the receiver can support the +// ElementIterator method (and by extension, ForEachElement) without panic. +func (val Value) CanIterateElements() bool { + return canElementIterator(val) +} + +// ForEachElement executes a given callback function for each element of +// the receiver, which must be a collection type or tuple type, or this method +// will panic. +// +// ForEachElement uses ElementIterator internally, and so the values passed +// to the callback are as described for ElementIterator. +// +// Returns true if the iteration exited early due to the callback function +// returning true, or false if the loop ran to completion. +// +// ForEachElement is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ForEachElement(cb ElementCallback) bool { + val.assertUnmarked() + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + stop := cb(key, val) + if stop { + return true + } + } + return false +} + +// Not returns the logical inverse of the receiver, which must be of type +// Bool or this method will panic. +func (val Value) Not() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Not().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(!val.v.(bool)) +} + +// And returns the result of logical AND with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) And(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.And(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) && other.v.(bool)) +} + +// Or returns the result of logical OR with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) Or(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Or(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) || other.v.(bool)) +} + +// LessThan returns True if the receiver is less than the other given value, +// which must both be numbers or this method will panic. +func (val Value) LessThan(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.LessThan(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) +} + +// GreaterThan returns True if the receiver is greater than the other given +// value, which must both be numbers or this method will panic. +func (val Value) GreaterThan(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.GreaterThan(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) +} + +// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or. +func (val Value) LessThanOrEqualTo(other Value) Value { + return val.LessThan(other).Or(val.Equals(other)) +} + +// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or. +func (val Value) GreaterThanOrEqualTo(other Value) Value { + return val.GreaterThan(other).Or(val.Equals(other)) +} + +// AsString returns the native string from a non-null, non-unknown cty.String +// value, or panics if called on any other value. +func (val Value) AsString() string { + val.assertUnmarked() + if val.ty != String { + panic("not a string") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + return val.v.(string) +} + +// AsBigFloat returns a big.Float representation of a non-null, non-unknown +// cty.Number value, or panics if called on any other value. +// +// For more convenient conversions to other native numeric types, use the +// "gocty" package. +func (val Value) AsBigFloat() *big.Float { + val.assertUnmarked() + if val.ty != Number { + panic("not a number") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + // Copy the float so that callers can't mutate our internal state + ret := *(val.v.(*big.Float)) + + return &ret +} + +// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown +// value of any type that CanIterateElements, or panics if called on +// any other value. +// +// For more convenient conversions to slices of more specific types, use +// the "gocty" package. +func (val Value) AsValueSlice() []Value { + val.assertUnmarked() + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret = append(ret, v) + } + return ret +} + +// AsValueMap returns a map[string]cty.Value representation of a non-null, +// non-unknown value of any type that CanIterateElements, or panics if called +// on any other value. +// +// For more convenient conversions to maps of more specific types, use +// the "gocty" package. +func (val Value) AsValueMap() map[string]Value { + val.assertUnmarked() + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make(map[string]Value, l) + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + ret[k.AsString()] = v + } + return ret +} + +// AsValueSet returns a ValueSet representation of a non-null, +// non-unknown value of any collection type, or panics if called +// on any other value. +// +// Unlike AsValueSlice and AsValueMap, this method requires specifically a +// collection type (list, set or map) and does not allow structural types +// (tuple or object), because the ValueSet type requires homogenous +// element types. +// +// The returned ValueSet can store only values of the receiver's element type. +func (val Value) AsValueSet() ValueSet { + val.assertUnmarked() + if !val.Type().IsCollectionType() { + panic("not a collection type") + } + + // We don't give the caller our own set.Set (assuming we're a cty.Set value) + // because then the caller could mutate our internals, which is forbidden. + // Instead, we will construct a new set and append our elements into it. + ret := NewValueSet(val.Type().ElementType()) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret.Add(v) + } + return ret +} + +// EncapsulatedValue returns the native value encapsulated in a non-null, +// non-unknown capsule-typed value, or panics if called on any other value. +// +// The result is the same pointer that was passed to CapsuleVal to create +// the value. Since cty considers values to be immutable, it is strongly +// recommended to treat the encapsulated value itself as immutable too. +func (val Value) EncapsulatedValue() interface{} { + val.assertUnmarked() + if !val.Type().IsCapsuleType() { + panic("not a capsule-typed value") + } + + return val.v +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/walk.go b/vendor/github.com/hashicorp/go-cty/cty/walk.go new file mode 100644 index 00000000..a6943bab --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/walk.go @@ -0,0 +1,182 @@ +package cty + +// Walk visits all of the values in a possibly-complex structure, calling +// a given function for each value. +// +// For example, given a list of strings the callback would first be called +// with the whole list and then called once for each element of the list. +// +// The callback function may prevent recursive visits to child values by +// returning false. The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Walk(val Value, cb func(Path, Value) (bool, error)) error { + var path Path + return walk(path, val, cb) +} + +func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { + deeper, err := cb(path, val) + if err != nil { + return err + } + if !deeper { + return nil + } + + if val.IsNull() || !val.IsKnown() { + // Can't recurse into null or unknown values, regardless of type + return nil + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + nameVal, av := it.Element() + path := append(path, GetAttrStep{ + Name: nameVal.AsString(), + }) + err := walk(path, av, cb) + if err != nil { + return err + } + } + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + err := walk(path, ev, cb) + if err != nil { + return err + } + } + } + return nil +} + +// Transform visits all of the values in a possibly-complex structure, +// calling a given function for each value which has an opportunity to +// replace that value. +// +// Unlike Walk, Transform visits child nodes first, so for a list of strings +// it would first visit the strings and then the _new_ list constructed +// from the transformed values of the list items. +// +// This is useful for creating the effect of being able to make deep mutations +// to a value even though values are immutable. However, it's the responsibility +// of the given function to preserve expected invariants, such as homogenity of +// element types in collections; this function can panic if such invariants +// are violated, just as if new values were constructed directly using the +// value constructor functions. An easy way to preserve invariants is to +// ensure that the transform function never changes the value type. +// +// The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) { + var path Path + return transform(path, val, cb) +} + +func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) { + ty := val.Type() + var newVal Value + + switch { + + case val.IsNull() || !val.IsKnown(): + // Can't recurse into null or unknown values, regardless of type + newVal = val + + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty sequence + newVal = val + default: + elems := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems = append(elems, newEv) + } + switch { + case ty.IsListType(): + newVal = ListVal(elems) + case ty.IsSetType(): + newVal = SetVal(elems) + case ty.IsTupleType(): + newVal = TupleVal(elems) + default: + panic("unknown sequence type") // should never happen because of the case we are in + } + } + + case ty.IsMapType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty map + newVal = val + default: + elems := make(map[string]Value) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems[kv.AsString()] = newEv + } + newVal = MapVal(elems) + } + + case ty.IsObjectType(): + switch { + case ty.Equals(EmptyObject): + // No deep transform for an empty object + newVal = val + default: + atys := ty.AttributeTypes() + newAVs := make(map[string]Value) + for name := range atys { + av := val.GetAttr(name) + path := append(path, GetAttrStep{ + Name: name, + }) + newAV, err := transform(path, av, cb) + if err != nil { + return DynamicVal, err + } + newAVs[name] = newAV + } + newVal = ObjectVal(newAVs) + } + + default: + newVal = val + } + + return cb(path, newVal) +} diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index bc56559c..f9e9effe 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -159,11 +159,8 @@ type ClientConfig struct { // SyncStdout, SyncStderr can be set to override the // respective os.Std* values in the plugin. Care should be taken to - // avoid races here. If these are nil, then this will automatically be - // hooked up to os.Stdin, Stdout, and Stderr, respectively. - // - // If the default values (nil) are used, then this package will not - // sync any of these streams. + // avoid races here. If these are nil, then this will be set to + // ioutil.Discard. SyncStdout io.Writer SyncStderr io.Writer @@ -690,14 +687,14 @@ func (c *Client) Start() (addr net.Addr, err error) { // Check the core protocol. Wrapped in a {} for scoping. { - var coreProtocol int64 - coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + var coreProtocol int + coreProtocol, err = strconv.Atoi(parts[0]) if err != nil { err = fmt.Errorf("Error parsing core protocol version: %s", err) return } - if int(coreProtocol) != CoreProtocolVersion { + if coreProtocol != CoreProtocolVersion { err = fmt.Errorf("Incompatible core API version with plugin. "+ "Plugin version: %s, Core version: %d\n\n"+ "To fix this, the plugin usually only needs to be recompiled.\n"+ @@ -788,7 +785,10 @@ func (c *Client) reattach() (net.Addr, error) { // Verify the process still exists. If not, then it is an error p, err := os.FindProcess(c.config.Reattach.Pid) if err != nil { - return nil, err + // On Unix systems, FindProcess never returns an error. + // On Windows, for non-existent pids it returns: + // os.SyscallError - 'OpenProcess: the paremter is incorrect' + return nil, ErrProcessNotFound } // Attempt to connect to the addr since on Unix systems FindProcess diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod index f3ddf44e..f0115b78 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.mod +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -1,17 +1,15 @@ module github.com/hashicorp/go-plugin +go 1.13 + require ( - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect - github.com/golang/protobuf v1.2.0 + github.com/golang/protobuf v1.3.4 github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/jhump/protoreflect v1.6.0 github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 github.com/oklog/run v1.0.0 github.com/stretchr/testify v1.3.0 // indirect - golang.org/x/net v0.0.0-20180826012351-8a410e7b638d - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc // indirect - golang.org/x/text v0.3.0 // indirect - google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect - google.golang.org/grpc v1.14.0 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + google.golang.org/grpc v1.27.1 ) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum index 21b14e99..5d497615 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.sum +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -1,31 +1,74 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go index d0d0d8e2..97812191 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -37,7 +37,6 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) - // Connect. Note the first parameter is unused because we use a custom // dialer that has the state to see the address. conn, err := grpc.Dial("unused", opts...) @@ -62,6 +61,13 @@ func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { go broker.Run() go brokerGRPCClient.StartStream() + // Start the stdio client + stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn) + if err != nil { + return nil, err + } + go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr) + cl := &GRPCClient{ Conn: conn, Plugins: c.config.Plugins, diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go index d3dbf1ce..387628bf 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -14,6 +14,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/health" "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" ) // GRPCServiceName is the name of the service that the health check should @@ -51,9 +52,10 @@ type GRPCServer struct { Stdout io.Reader Stderr io.Reader - config GRPCServerConfig - server *grpc.Server - broker *GRPCBroker + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + stdioServer *grpcStdioServer logger hclog.Logger } @@ -73,6 +75,9 @@ func (s *GRPCServer) Init() error { GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + // Register the reflection service + reflection.Register(s.server) + // Register the broker service brokerServer := newGRPCBrokerServer() plugin.RegisterGRPCBrokerServer(s.server, brokerServer) @@ -80,11 +85,13 @@ func (s *GRPCServer) Init() error { go s.broker.Run() // Register the controller - controllerServer := &grpcControllerServer{ - server: s, - } + controllerServer := &grpcControllerServer{server: s} plugin.RegisterGRPCControllerServer(s.server, controllerServer) + // Register the stdio service + s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr) + plugin.RegisterGRPCStdioServer(s.server, s.stdioServer) + // Register all our plugins onto the gRPC server. for k, raw := range s.Plugins { p, ok := raw.(GRPCPlugin) diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go new file mode 100644 index 00000000..40977c0a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -0,0 +1,207 @@ +package plugin + +import ( + "bufio" + "bytes" + "context" + "io" + + empty "github.com/golang/protobuf/ptypes/empty" + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hashicorp/go-plugin/internal/plugin" +) + +// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of +// stdio data. This is currently 1 KB for no reason other than that seems like +// enough (stdio data isn't that common) and is fairly low. +const grpcStdioBuffer = 1 * 1024 + +// grpcStdioServer implements the Stdio service and streams stdiout/stderr. +type grpcStdioServer struct { + stdoutCh <-chan []byte + stderrCh <-chan []byte +} + +// newGRPCStdioServer creates a new grpcStdioServer and starts the stream +// copying for the given out and err readers. +// +// This must only be called ONCE per srcOut, srcErr. +func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer { + stdoutCh := make(chan []byte) + stderrCh := make(chan []byte) + + // Begin copying the streams + go copyChan(log, stdoutCh, srcOut) + go copyChan(log, stderrCh, srcErr) + + // Construct our server + return &grpcStdioServer{ + stdoutCh: stdoutCh, + stderrCh: stderrCh, + } +} + +// StreamStdio streams our stdout/err as the response. +func (s *grpcStdioServer) StreamStdio( + _ *empty.Empty, + srv plugin.GRPCStdio_StreamStdioServer, +) error { + // Share the same data value between runs. Sending this over the wire + // marshals it so we can reuse this. + var data plugin.StdioData + + for { + // Read our data + select { + case data.Data = <-s.stdoutCh: + data.Channel = plugin.StdioData_STDOUT + + case data.Data = <-s.stderrCh: + data.Channel = plugin.StdioData_STDERR + + case <-srv.Context().Done(): + return nil + } + + // Not sure if this is possible, but if we somehow got here and + // we didn't populate any data at all, then just continue. + if len(data.Data) == 0 { + continue + } + + // Send our data to the client. + if err := srv.Send(&data); err != nil { + return err + } + } +} + +// grpcStdioClient wraps the stdio service as a client to copy +// the stdio data to output writers. +type grpcStdioClient struct { + log hclog.Logger + stdioClient plugin.GRPCStdio_StreamStdioClient +} + +// newGRPCStdioClient creates a grpcStdioClient. This will perform the +// initial connection to the stdio service. If the stdio service is unavailable +// then this will be a no-op. This allows this to work without error for +// plugins that don't support this. +func newGRPCStdioClient( + ctx context.Context, + log hclog.Logger, + conn *grpc.ClientConn, +) (*grpcStdioClient, error) { + client := plugin.NewGRPCStdioClient(conn) + + // Connect immediately to the endpoint + stdioClient, err := client.StreamStdio(ctx, &empty.Empty{}) + + // If we get an Unavailable error, this means that the plugin isn't + // updated and linking to the latest version of go-plugin that supports + // this. We fall back to the previous behavior of just not syncing anything. + if status.Code(err) == codes.Unavailable { + log.Warn("stdio service not available, stdout/stderr syncing unavailable") + stdioClient = nil + err = nil + } + if err != nil { + return nil, err + } + + return &grpcStdioClient{ + log: log, + stdioClient: stdioClient, + }, nil +} + +// Run starts the loop that receives stdio data and writes it to the given +// writers. This blocks and should be run in a goroutine. +func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { + // This will be nil if stdio is not supported by the plugin + if c.stdioClient == nil { + c.log.Warn("stdio service unavailable, run will do nothing") + return + } + + for { + c.log.Trace("waiting for stdio data") + data, err := c.stdioClient.Recv() + if err != nil { + if err == io.EOF || + status.Code(err) == codes.Unavailable || + status.Code(err) == codes.Canceled || + err == context.Canceled { + c.log.Warn("received EOF, stopping recv loop", "err", err) + return + } + + c.log.Error("error receiving data", "err", err) + continue + } + + // Determine our output writer based on channel + var w io.Writer + switch data.Channel { + case plugin.StdioData_STDOUT: + w = stdout + + case plugin.StdioData_STDERR: + w = stderr + + default: + c.log.Warn("unknown channel, dropping", "channel", data.Channel) + continue + } + + // Write! In the event of an error we just continue. + if c.log.IsTrace() { + c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data)) + } + if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil { + c.log.Error("failed to copy all bytes", "err", err) + } + } +} + +// copyChan copies an io.Reader into a channel. +func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) { + bufsrc := bufio.NewReader(src) + + for { + // Make our data buffer. We allocate a new one per loop iteration + // so that we can send it over the channel. + var data [1024]byte + + // Read the data, this will block until data is available + n, err := bufsrc.Read(data[:]) + + // We have to check if we have data BEFORE err != nil. The bufio + // docs guarantee n == 0 on EOF but its better to be safe here. + if n > 0 { + // We have data! Send it on the channel. This will block if there + // is no reader on the other side. We expect that go-plugin will + // connect immediately to the stdio server to drain this so we want + // this block to happen for backpressure. + dst <- data[:n] + } + + // If we hit EOF we're done copying + if err == io.EOF { + log.Debug("stdio EOF, exiting copy loop") + return + } + + // Any other error we just exit the loop. We don't expect there to + // be errors since our use case for this is reading/writing from + // a in-process pipe (os.Pipe). + if err != nil { + log.Warn("error copying stdio data, stopping copy", "err", err) + return + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go index aa2fdc81..fb9d4152 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -1,3 +1,3 @@ -//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:. +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto ./grpc_stdio.proto --go_out=plugins=grpc:. package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go index b6850aa5..6bf10385 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -3,12 +3,13 @@ package plugin +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" context "golang.org/x/net/context" grpc "google.golang.org/grpc" - math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -35,17 +36,16 @@ func (m *ConnInfo) Reset() { *m = ConnInfo{} } func (m *ConnInfo) String() string { return proto.CompactTextString(m) } func (*ConnInfo) ProtoMessage() {} func (*ConnInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_802e9beed3ec3b28, []int{0} + return fileDescriptor_grpc_broker_3322b07398605250, []int{0} } - func (m *ConnInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ConnInfo.Unmarshal(m, b) } func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) } -func (m *ConnInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnInfo.Merge(m, src) +func (dst *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(dst, src) } func (m *ConnInfo) XXX_Size() int { return xxx_messageInfo_ConnInfo.Size(m) @@ -81,23 +81,6 @@ func init() { proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") } -func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) } - -var fileDescriptor_802e9beed3ec3b28 = []byte{ - // 175 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, - 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, - 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, - 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, - 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, - 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, - 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, - 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, - 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, - 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, - 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, -} - // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -201,3 +184,20 @@ var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ }, Metadata: "grpc_broker.proto", } + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_grpc_broker_3322b07398605250) } + +var fileDescriptor_grpc_broker_3322b07398605250 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto index 3fa79e8a..aa3df463 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -11,5 +11,3 @@ message ConnInfo { service GRPCBroker { rpc StartStream(stream ConnInfo) returns (stream ConnInfo); } - - diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go index 38b42043..3e39da95 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -3,12 +3,13 @@ package plugin +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" context "golang.org/x/net/context" grpc "google.golang.org/grpc" - math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -32,17 +33,16 @@ func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} func (*Empty) Descriptor() ([]byte, []int) { - return fileDescriptor_23c2c7e42feab570, []int{0} + return fileDescriptor_grpc_controller_08f8296ef6d80436, []int{0} } - func (m *Empty) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Empty.Unmarshal(m, b) } func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Empty.Marshal(b, m, deterministic) } -func (m *Empty) XXX_Merge(src proto.Message) { - xxx_messageInfo_Empty.Merge(m, src) +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) } func (m *Empty) XXX_Size() int { return xxx_messageInfo_Empty.Size(m) @@ -57,19 +57,6 @@ func init() { proto.RegisterType((*Empty)(nil), "plugin.Empty") } -func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) } - -var fileDescriptor_23c2c7e42feab570 = []byte{ - // 108 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, - 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, - 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, - 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, - 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, - 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, -} - // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -141,3 +128,18 @@ var _GRPCController_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "grpc_controller.proto", } + +func init() { + proto.RegisterFile("grpc_controller.proto", fileDescriptor_grpc_controller_08f8296ef6d80436) +} + +var fileDescriptor_grpc_controller_08f8296ef6d80436 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go new file mode 100644 index 00000000..c8f94921 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_stdio.proto + +package plugin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StdioData_Channel int32 + +const ( + StdioData_INVALID StdioData_Channel = 0 + StdioData_STDOUT StdioData_Channel = 1 + StdioData_STDERR StdioData_Channel = 2 +) + +var StdioData_Channel_name = map[int32]string{ + 0: "INVALID", + 1: "STDOUT", + 2: "STDERR", +} +var StdioData_Channel_value = map[string]int32{ + "INVALID": 0, + "STDOUT": 1, + "STDERR": 2, +} + +func (x StdioData_Channel) String() string { + return proto.EnumName(StdioData_Channel_name, int32(x)) +} +func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0, 0} +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +type StdioData struct { + Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StdioData) Reset() { *m = StdioData{} } +func (m *StdioData) String() string { return proto.CompactTextString(m) } +func (*StdioData) ProtoMessage() {} +func (*StdioData) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0} +} +func (m *StdioData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StdioData.Unmarshal(m, b) +} +func (m *StdioData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StdioData.Marshal(b, m, deterministic) +} +func (dst *StdioData) XXX_Merge(src proto.Message) { + xxx_messageInfo_StdioData.Merge(dst, src) +} +func (m *StdioData) XXX_Size() int { + return xxx_messageInfo_StdioData.Size(m) +} +func (m *StdioData) XXX_DiscardUnknown() { + xxx_messageInfo_StdioData.DiscardUnknown(m) +} + +var xxx_messageInfo_StdioData proto.InternalMessageInfo + +func (m *StdioData) GetChannel() StdioData_Channel { + if m != nil { + return m.Channel + } + return StdioData_INVALID +} + +func (m *StdioData) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*StdioData)(nil), "plugin.StdioData") + proto.RegisterEnum("plugin.StdioData_Channel", StdioData_Channel_name, StdioData_Channel_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCStdioClient is the client API for GRPCStdio service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCStdioClient interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) +} + +type gRPCStdioClient struct { + cc *grpc.ClientConn +} + +func NewGRPCStdioClient(cc *grpc.ClientConn) GRPCStdioClient { + return &gRPCStdioClient{cc} +} + +func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCStdio_serviceDesc.Streams[0], "/plugin.GRPCStdio/StreamStdio", opts...) + if err != nil { + return nil, err + } + x := &gRPCStdioStreamStdioClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type GRPCStdio_StreamStdioClient interface { + Recv() (*StdioData, error) + grpc.ClientStream +} + +type gRPCStdioStreamStdioClient struct { + grpc.ClientStream +} + +func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { + m := new(StdioData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCStdioServer is the server API for GRPCStdio service. +type GRPCStdioServer interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(*empty.Empty, GRPCStdio_StreamStdioServer) error +} + +func RegisterGRPCStdioServer(s *grpc.Server, srv GRPCStdioServer) { + s.RegisterService(&_GRPCStdio_serviceDesc, srv) +} + +func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(empty.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) +} + +type GRPCStdio_StreamStdioServer interface { + Send(*StdioData) error + grpc.ServerStream +} + +type gRPCStdioStreamStdioServer struct { + grpc.ServerStream +} + +func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { + return x.ServerStream.SendMsg(m) +} + +var _GRPCStdio_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCStdio", + HandlerType: (*GRPCStdioServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamStdio", + Handler: _GRPCStdio_StreamStdio_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc_stdio.proto", +} + +func init() { proto.RegisterFile("grpc_stdio.proto", fileDescriptor_grpc_stdio_db2934322ca63bd5) } + +var fileDescriptor_grpc_stdio_db2934322ca63bd5 = []byte{ + // 221 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x2f, 0x2a, 0x48, + 0x8e, 0x2f, 0x2e, 0x49, 0xc9, 0xcc, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0xc8, + 0x29, 0x4d, 0xcf, 0xcc, 0x93, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x8b, 0x26, + 0x95, 0xa6, 0xe9, 0xa7, 0xe6, 0x16, 0x94, 0x54, 0x42, 0x14, 0x29, 0xb5, 0x30, 0x72, 0x71, 0x06, + 0x83, 0x34, 0xb9, 0x24, 0x96, 0x24, 0x0a, 0x19, 0x73, 0xb1, 0x27, 0x67, 0x24, 0xe6, 0xe5, 0xa5, + 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x49, 0xea, 0x41, 0x0c, 0xd1, 0x83, 0xab, 0xd1, + 0x73, 0x86, 0x28, 0x08, 0x82, 0xa9, 0x14, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60, + 0x52, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xd8, 0xa1, 0xea, 0x84, 0xb8, 0xb9, + 0xd8, 0x3d, 0xfd, 0xc2, 0x1c, 0x7d, 0x3c, 0x5d, 0x04, 0x18, 0x84, 0xb8, 0xb8, 0xd8, 0x82, 0x43, + 0x5c, 0xfc, 0x43, 0x43, 0x04, 0x18, 0xa1, 0x6c, 0xd7, 0xa0, 0x20, 0x01, 0x26, 0x23, 0x77, 0x2e, + 0x4e, 0xf7, 0xa0, 0x00, 0x67, 0xb0, 0x2d, 0x42, 0x56, 0x5c, 0xdc, 0xc1, 0x25, 0x45, 0xa9, 0x89, + 0xb9, 0x10, 0xae, 0x98, 0x1e, 0xc4, 0x03, 0x7a, 0x30, 0x0f, 0xe8, 0xb9, 0x82, 0x3c, 0x20, 0x25, + 0x88, 0xe1, 0x36, 0x03, 0x46, 0x27, 0x8e, 0x28, 0xa8, 0xb7, 0x93, 0xd8, 0xc0, 0xca, 0x8d, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xbb, 0xe0, 0x69, 0x19, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto new file mode 100644 index 00000000..ce1a1223 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +import "google/protobuf/empty.proto"; + +// GRPCStdio is a service that is automatically run by the plugin process +// to stream any stdout/err data so that it can be mirrored on the plugin +// host side. +service GRPCStdio { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData); +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +message StdioData { + enum Channel { + INVALID = 0; + STDOUT = 1; + STDERR = 2; + } + + Channel channel = 1; + bytes data = 2; +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index 4c230e3a..e8b8c977 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -15,10 +15,8 @@ import ( "sort" "strconv" "strings" - "sync/atomic" - - "github.com/hashicorp/go-hclog" + hclog "github.com/hashicorp/go-hclog" "google.golang.org/grpc" ) @@ -337,11 +335,11 @@ func Serve(opts *ServeConfig) { ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt) go func() { - var count int32 = 0 + count := 0 for { <-ch - newCount := atomic.AddInt32(&count, 1) - logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) + count++ + logger.Trace("plugin received interrupt signal, ignoring", "count", count) } }() diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go index 2cf2c26c..e36f2eb2 100644 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -7,9 +7,9 @@ import ( "net" "net/rpc" - "github.com/mitchellh/go-testing-interface" hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin/internal/plugin" + "github.com/mitchellh/go-testing-interface" "google.golang.org/grpc" ) diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore deleted file mode 100644 index 15586a2b..00000000 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -y.output - -# ignore intellij files -.idea -*.iml -*.ipr -*.iws - -*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml deleted file mode 100644 index 3f83d902..00000000 --- a/vendor/github.com/hashicorp/hcl/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.8 - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile deleted file mode 100644 index 84fd743f..00000000 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md deleted file mode 100644 index c8223326..00000000 --- a/vendor/github.com/hashicorp/hcl/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# HCL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) - -HCL (HashiCorp Configuration Language) is a configuration language built -by HashiCorp. The goal of HCL is to build a structured configuration language -that is both human and machine friendly for use with command-line tools, but -specifically targeted towards DevOps tools, servers, etc. - -HCL is also fully JSON compatible. That is, JSON can be used as completely -valid input to a system expecting HCL. This helps makes systems -interoperable with other systems. - -HCL is heavily inspired by -[libucl](https://github.com/vstakhov/libucl), -nginx configuration, and others similar. - -## Why? - -A common question when viewing HCL is to ask the question: why not -JSON, YAML, etc.? - -Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) -used a variety of configuration languages from full programming languages -such as Ruby to complete data structure languages such as JSON. What we -learned is that some people wanted human-friendly configuration languages -and some people wanted machine-friendly languages. - -JSON fits a nice balance in this, but is fairly verbose and most -importantly doesn't support comments. With YAML, we found that beginners -had a really hard time determining what the actual structure was, and -ended up guessing more often than not whether to use a hyphen, colon, etc. -in order to represent some configuration key. - -Full programming languages such as Ruby enable complex behavior -a configuration language shouldn't usually allow, and also forces -people to learn some set of Ruby. - -Because of this, we decided to create our own configuration language -that is JSON-compatible. Our configuration language (HCL) is designed -to be written and modified by humans. The API for HCL allows JSON -as an input so that it is also machine-friendly (machines can generate -JSON instead of trying to generate HCL). - -Our goal with HCL is not to alienate other configuration languages. -It is instead to provide HCL as a specialized language for our tools, -and JSON as the interoperability layer. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammar is listed here. - - * Single line comments start with `#` or `//` - - * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments - are not allowed. A multi-line comment (also known as a block comment) - terminates at the first `*/` found. - - * Values are assigned with the syntax `key = value` (whitespace doesn't - matter). The value can be any primitive: a string, number, boolean, - object, or list. - - * Strings are double-quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Multi-line strings start with `<- - echo %Path% - - go version - - go env - - go get -t ./... - -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index 0b39c1b9..00000000 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,724 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int, reflect.Int32, reflect.Int64: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - - // if item is an object that was decoded from ambiguous JSON and - // flattened, make sure it's expanded if it needs to decode into a - // defined structure. - item := expandObject(item, val) - - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -// expandObject detects if an ambiguous JSON object was flattened to a List which -// should be decoded into a struct, and expands the ast to properly deocode. -func expandObject(node ast.Node, result reflect.Value) ast.Node { - item, ok := node.(*ast.ObjectItem) - if !ok { - return node - } - - elemType := result.Type() - - // our target type must be a struct - switch elemType.Kind() { - case reflect.Ptr: - switch elemType.Elem().Kind() { - case reflect.Struct: - //OK - default: - return node - } - case reflect.Struct: - //OK - default: - return node - } - - // A list value will have a key and field name. If it had more fields, - // it wouldn't have been flattened. - if len(item.Keys) != 2 { - return node - } - - keyToken := item.Keys[0].Token - item.Keys = item.Keys[1:] - - // we need to un-flatten the ast enough to decode - newNode := &ast.ObjectItem{ - Keys: []*ast.ObjectKey{ - &ast.ObjectKey{ - Token: keyToken, - }, - }, - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - - return newNode -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok && item != nil { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - - // Ignore fields with tag name "-" - if tagParts[0] == "-" { - continue - } - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - for fieldType, field := range fields { - if !field.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !field.CanSet() { - continue - } - - fieldName := fieldType.Name - - tagValue := fieldType.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, field) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - field.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, field) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used key - usedKeys[fieldName] = struct{}{} - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, field); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, field); err != nil { - return err - } - } - - decodedFields = append(decodedFields, fieldType.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b5..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index 6e5ef654..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - // I'm not entirely sure what causes this, but removing this causes - // a test failure. We should investigate at some point. - if len(o.Keys) == 0 { - return token.Pos{} - } - - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // comment types, only used when in a list - LeadComment *CommentGroup - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } -func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad42..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381d..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index b4881806..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,520 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - // normalize all line endings - // since the scanner and output only work with "\n" line endings, we may - // end up with dangling "\r" characters in the parsed data. - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList(false) - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -// objectList parses a list of items within an object (generally k/v pairs). -// The parameter" obj" tells this whether to we are within an object (braces: -// '{', '}') or just at the top level. If we're within an object, we end -// at an RBRACE. -func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - if obj { - tok := p.scan() - p.unscan() - if tok.Type == token.RBRACE { - break - } - } - - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // object lists can be optionally comma-delimited e.g. when a list of maps - // is being expressed, so a comma is allowed here - it's simply consumed - tok := p.scan() - if tok.Type != token.COMMA { - p.unscan() - } - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")) - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - var err error - - // If we have no keys, then it is a syntax error. i.e. {{}} is not - // allowed. - if len(keys) == 0 { - err = &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), - } - } - - // object - return keys, err - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("illegal character"), - } - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList(true) - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // No error, scan and expect the ending to be a brace - if tok := p.scan(); tok.Type != token.RBRACE { - return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - if needComma { - switch tok.Type { - case token.COMMA, token.RBRACK: - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error parsing list, expected comma or list end, got: %s", - tok.Type), - } - } - } - switch tok.Type { - case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - node, err := p.literalType() - if err != nil { - return nil, err - } - - // If there is a lead comment, apply it - if p.leadComment != nil { - node.LeadComment = p.leadComment - p.leadComment = nil - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil && len(l.List) > 0 { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.LBRACE: - // Looks like a nested object, so parse it out - node, err := p.objectType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse object within list: %s", err), - } - } - l.Add(node) - needComma = true - case token.LBRACK: - node, err := p.listType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse list within list: %s", err), - } - } - l.Add(node) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index 69662367..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,651 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // If we see a null character with data left, then that is an error - if ch == '\x00' && s.buf.Len() > 0 { - s.err("unexpected null character (0x00)") - return eof - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - if ch == '/' && s.peek() != '/' { - s.err("expected '/' for comment") - return - } - - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa begining from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - start := n - for n > 0 && digitVal(ch) < base { - ch = s.next() - if ch == eof { - // If we see an EOF, we halt any more scanning of digits - // immediately. - break - } - - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - if n != start { - // we scanned all digits, put the last non digit char back, - // only if we read anything at all - s.unread() - } - - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 5f981eaa..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,241 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - if s[0] == '\n' { - return "", ErrSyntax - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb72..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index e37c0664..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list is empty, keep the original list - if len(ot.List) == 0 { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 125a5f07..00000000 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,313 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - pos := p.tok.Pos - o.Assign = hcltoken.Pos{ - Filename: pos.Filename, - Offset: pos.Offset, - Line: pos.Line, - Column: pos.Column, - } - - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // If we have a zero keycount it means that we never got - // an object key, i.e. `{ :`. This is a syntax error. - if keyCount == 0 { - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - - // Done - return keys, nil - case token.ILLEGAL: - return nil, errors.New("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index dd5c72bb..00000000 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa begining from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb72..00000000 --- a/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3ee..00000000 --- a/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c29..00000000 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c4..00000000 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md deleted file mode 100644 index f59ce92e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md +++ /dev/null @@ -1,184 +0,0 @@ -# HCL Dynamic Blocks Extension - -This HCL extension implements a special block type named "dynamic" that can -be used to dynamically generate blocks of other types by iterating over -collection values. - -Normally the block structure in an HCL configuration file is rigid, even -though dynamic expressions can be used within attribute values. This is -convenient for most applications since it allows the overall structure of -the document to be decoded easily, but in some applications it is desirable -to allow dynamic block generation within certain portions of the configuration. - -Dynamic block generation is performed using the `dynamic` block type: - -```hcl -toplevel { - nested { - foo = "static block 1" - } - - dynamic "nested" { - for_each = ["a", "b", "c"] - iterator = nested - content { - foo = "dynamic block ${nested.value}" - } - } - - nested { - foo = "static block 2" - } -} -``` - -The above is interpreted as if it were written as follows: - -```hcl -toplevel { - nested { - foo = "static block 1" - } - - nested { - foo = "dynamic block a" - } - - nested { - foo = "dynamic block b" - } - - nested { - foo = "dynamic block c" - } - - nested { - foo = "static block 2" - } -} -``` - -Since HCL block syntax is not normally exposed to the possibility of unknown -values, this extension must make some compromises when asked to iterate over -an unknown collection. If the length of the collection cannot be statically -recognized (because it is an unknown value of list, map, or set type) then -the `dynamic` construct will generate a _single_ dynamic block whose iterator -key and value are both unknown values of the dynamic pseudo-type, thus causing -any attribute values derived from iteration to appear as unknown values. There -is no explicit representation of the fact that the length of the collection may -eventually be different than one. - -## Usage - -Pass a body to function `Expand` to obtain a new body that will, on access -to its content, evaluate and expand any nested `dynamic` blocks. -Dynamic block processing is also automatically propagated into any nested -blocks that are returned, allowing users to nest dynamic blocks inside -one another and to nest dynamic blocks inside other static blocks. - -HCL structural decoding does not normally have access to an `EvalContext`, so -any variables and functions that should be available to the `for_each` -and `labels` expressions must be passed in when calling `Expand`. Expressions -within the `content` block are evaluated separately and so can be passed a -separate `EvalContext` if desired, during normal attribute expression -evaluation. - -## Detecting Variables - -Some applications dynamically generate an `EvalContext` by analyzing which -variables are referenced by an expression before evaluating it. - -This unfortunately requires some extra effort when this analysis is required -for the context passed to `Expand`: the HCL API requires a schema to be -provided in order to do any analysis of the blocks in a body, but the low-level -schema model provides a description of only one level of nested blocks at -a time, and thus a new schema must be provided for each additional level of -nesting. - -To make this arduous process as convenient as possible, this package provides -a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode` -instance that can be used to find variables directly in a given body and also -determine which nested blocks require recursive calls. Using this mechanism -requires that the caller be able to look up a schema given a nested block type. -For _simple_ formats where a specific block type name always has the same schema -regardless of context, a walk can be implemented as follows: - -```go -func walkVariables(node dynblock.WalkVariablesNode, schema *hcl.BodySchema) []hcl.Traversal { - vars, children := node.Visit(schema) - - for _, child := range children { - var childSchema *hcl.BodySchema - switch child.BlockTypeName { - case "a": - childSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "b", - LabelNames: []string{"key"}, - }, - }, - } - case "b": - childSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "val", - Required: true, - }, - }, - } - default: - // Should never happen, because the above cases should be exhaustive - // for the application's configuration format. - panic(fmt.Errorf("can't find schema for unknown block type %q", child.BlockTypeName)) - } - - vars = append(vars, testWalkAndAccumVars(child.Node, childSchema)...) - } -} -``` - -### Detecting Variables with `hcldec` Specifications - -For applications that use the higher-level `hcldec` package to decode nested -configuration structures into `cty` values, the same specification can be used -to automatically drive the recursive variable-detection walk described above. - -The helper function `ForEachVariablesHCLDec` allows an entire recursive -configuration structure to be analyzed in a single call given a `hcldec.Spec` -that describes the nested block structure. This means a `hcldec`-based -application can support dynamic blocks with only a little additional effort: - -```go -func decodeBody(body hcl.Body, spec hcldec.Spec) (cty.Value, hcl.Diagnostics) { - // Determine which variables are needed to expand dynamic blocks - neededForDynamic := dynblock.ForEachVariablesHCLDec(body, spec) - - // Build a suitable EvalContext and expand dynamic blocks - dynCtx := buildEvalContext(neededForDynamic) - dynBody := dynblock.Expand(body, dynCtx) - - // Determine which variables are needed to fully decode the expanded body - // This will analyze expressions that came both from static blocks in the - // original body and from blocks that were dynamically added by Expand. - neededForDecode := hcldec.Variables(dynBody, spec) - - // Build a suitable EvalContext and then fully decode the body as per the - // hcldec specification. - decCtx := buildEvalContext(neededForDecode) - return hcldec.Decode(dynBody, spec, decCtx) -} - -func buildEvalContext(needed []hcl.Traversal) *hcl.EvalContext { - // (to be implemented by your application) -} -``` - -# Performance - -This extension is going quite harshly against the grain of the HCL API, and -so it uses lots of wrapping objects and temporary data structures to get its -work done. HCL in general is not suitable for use in high-performance situations -or situations sensitive to memory pressure, but that is _especially_ true for -this extension. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go deleted file mode 100644 index 65a9eab2..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go +++ /dev/null @@ -1,262 +0,0 @@ -package dynblock - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// expandBody wraps another hcl.Body and expands any "dynamic" blocks found -// inside whenever Content or PartialContent is called. -type expandBody struct { - original hcl.Body - forEachCtx *hcl.EvalContext - iteration *iteration // non-nil if we're nested inside another "dynamic" block - - // These are used with PartialContent to produce a "remaining items" - // body to return. They are nil on all bodies fresh out of the transformer. - // - // Note that this is re-implemented here rather than delegating to the - // existing support required by the underlying body because we need to - // retain access to the entire original body on subsequent decode operations - // so we can retain any "dynamic" blocks for types we didn't take consume - // on the first pass. - hiddenAttrs map[string]struct{} - hiddenBlocks map[string]hcl.BlockHeaderSchema -} - -func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - extSchema := b.extendSchema(schema) - rawContent, diags := b.original.Content(extSchema) - - blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false) - diags = append(diags, blockDiags...) - attrs := b.prepareAttributes(rawContent.Attributes) - - content := &hcl.BodyContent{ - Attributes: attrs, - Blocks: blocks, - MissingItemRange: b.original.MissingItemRange(), - } - - return content, diags -} - -func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - extSchema := b.extendSchema(schema) - rawContent, _, diags := b.original.PartialContent(extSchema) - // We discard the "remain" argument above because we're going to construct - // our own remain that also takes into account remaining "dynamic" blocks. - - blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true) - diags = append(diags, blockDiags...) - attrs := b.prepareAttributes(rawContent.Attributes) - - content := &hcl.BodyContent{ - Attributes: attrs, - Blocks: blocks, - MissingItemRange: b.original.MissingItemRange(), - } - - remain := &expandBody{ - original: b.original, - forEachCtx: b.forEachCtx, - iteration: b.iteration, - hiddenAttrs: make(map[string]struct{}), - hiddenBlocks: make(map[string]hcl.BlockHeaderSchema), - } - for name := range b.hiddenAttrs { - remain.hiddenAttrs[name] = struct{}{} - } - for typeName, blockS := range b.hiddenBlocks { - remain.hiddenBlocks[typeName] = blockS - } - for _, attrS := range schema.Attributes { - remain.hiddenAttrs[attrS.Name] = struct{}{} - } - for _, blockS := range schema.Blocks { - remain.hiddenBlocks[blockS.Type] = blockS - } - - return content, remain, diags -} - -func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { - // We augment the requested schema to also include our special "dynamic" - // block type, since then we'll get instances of it interleaved with - // all of the literal child blocks we must also include. - extSchema := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1), - } - copy(extSchema.Blocks, schema.Blocks) - extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) - - // If we have any hiddenBlocks then we also need to register those here - // so that a call to "Content" on the underlying body won't fail. - // (We'll filter these out again once we process the result of either - // Content or PartialContent.) - for _, blockS := range b.hiddenBlocks { - extSchema.Blocks = append(extSchema.Blocks, blockS) - } - - // If we have any hiddenAttrs then we also need to register these, for - // the same reason as we deal with hiddenBlocks above. - if len(b.hiddenAttrs) != 0 { - newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs)) - copy(newAttrs, extSchema.Attributes) - for name := range b.hiddenAttrs { - newAttrs = append(newAttrs, hcl.AttributeSchema{ - Name: name, - Required: false, - }) - } - extSchema.Attributes = newAttrs - } - - return extSchema -} - -func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes { - if len(b.hiddenAttrs) == 0 && b.iteration == nil { - // Easy path: just pass through the attrs from the original body verbatim - return rawAttrs - } - - // Otherwise we have some work to do: we must filter out any attributes - // that are hidden (since a previous PartialContent call already saw these) - // and wrap the expressions of the inner attributes so that they will - // have access to our iteration variables. - attrs := make(hcl.Attributes, len(rawAttrs)) - for name, rawAttr := range rawAttrs { - if _, hidden := b.hiddenAttrs[name]; hidden { - continue - } - if b.iteration != nil { - attr := *rawAttr // shallow copy so we can mutate it - attr.Expr = exprWrap{ - Expression: attr.Expr, - i: b.iteration, - } - attrs[name] = &attr - } else { - // If we have no active iteration then no wrapping is required. - attrs[name] = rawAttr - } - } - return attrs -} - -func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) { - var blocks hcl.Blocks - var diags hcl.Diagnostics - - for _, rawBlock := range rawBlocks { - switch rawBlock.Type { - case "dynamic": - realBlockType := rawBlock.Labels[0] - if _, hidden := b.hiddenBlocks[realBlockType]; hidden { - continue - } - - var blockS *hcl.BlockHeaderSchema - for _, candidate := range schema.Blocks { - if candidate.Type == realBlockType { - blockS = &candidate - break - } - } - if blockS == nil { - // Not a block type that the caller requested. - if !partial { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported block type", - Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType), - Subject: &rawBlock.LabelRanges[0], - }) - } - continue - } - - spec, specDiags := b.decodeSpec(blockS, rawBlock) - diags = append(diags, specDiags...) - if specDiags.HasErrors() { - continue - } - - if spec.forEachVal.IsKnown() { - for it := spec.forEachVal.ElementIterator(); it.Next(); { - key, value := it.Element() - i := b.iteration.MakeChild(spec.iteratorName, key, value) - - block, blockDiags := spec.newBlock(i, b.forEachCtx) - diags = append(diags, blockDiags...) - if block != nil { - // Attach our new iteration context so that attributes - // and other nested blocks can refer to our iterator. - block.Body = b.expandChild(block.Body, i) - blocks = append(blocks, block) - } - } - } else { - // If our top-level iteration value isn't known then we're forced - // to compromise since HCL doesn't have any concept of an - // "unknown block". In this case then, we'll produce a single - // dynamic block with the iterator values set to DynamicVal, - // which at least makes the potential for a block visible - // in our result, even though it's not represented in a fully-accurate - // way. - i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal) - block, blockDiags := spec.newBlock(i, b.forEachCtx) - diags = append(diags, blockDiags...) - if block != nil { - block.Body = b.expandChild(block.Body, i) - - // We additionally force all of the leaf attribute values - // in the result to be unknown so the calling application - // can, if necessary, use that as a heuristic to detect - // when a single nested block might be standing in for - // multiple blocks yet to be expanded. This retains the - // structure of the generated body but forces all of its - // leaf attribute values to be unknown. - block.Body = unknownBody{block.Body} - - blocks = append(blocks, block) - } - } - - default: - if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden { - // A static block doesn't create a new iteration context, but - // it does need to inherit _our own_ iteration context in - // case it contains expressions that refer to our inherited - // iterators, or nested "dynamic" blocks. - expandedBlock := *rawBlock // shallow copy - expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration) - blocks = append(blocks, &expandedBlock) - } - } - } - - return blocks, diags -} - -func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body { - chiCtx := i.EvalContext(b.forEachCtx) - ret := Expand(child, chiCtx) - ret.(*expandBody).iteration = i - return ret -} - -func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - // blocks aren't allowed in JustAttributes mode and this body can - // only produce blocks, so we'll just pass straight through to our - // underlying body here. - return b.original.JustAttributes() -} - -func (b *expandBody) MissingItemRange() hcl.Range { - return b.original.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go deleted file mode 100644 index 98a51ead..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go +++ /dev/null @@ -1,215 +0,0 @@ -package dynblock - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -type expandSpec struct { - blockType string - blockTypeRange hcl.Range - defRange hcl.Range - forEachVal cty.Value - iteratorName string - labelExprs []hcl.Expression - contentBody hcl.Body - inherited map[string]*iteration -} - -func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var schema *hcl.BodySchema - if len(blockS.LabelNames) != 0 { - schema = dynamicBlockBodySchemaLabels - } else { - schema = dynamicBlockBodySchemaNoLabels - } - - specContent, specDiags := rawSpec.Body.Content(schema) - diags = append(diags, specDiags...) - if specDiags.HasErrors() { - return nil, diags - } - - //// for_each attribute - - eachAttr := specContent.Attributes["for_each"] - eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx) - diags = append(diags, eachDiags...) - - if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType { - // We skip this error for DynamicPseudoType because that means we either - // have a null (which is checked immediately below) or an unknown - // (which is handled in the expandBody Content methods). - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic for_each value", - Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()), - Subject: eachAttr.Expr.Range().Ptr(), - Expression: eachAttr.Expr, - EvalContext: b.forEachCtx, - }) - return nil, diags - } - if eachVal.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic for_each value", - Detail: "Cannot use a null value in for_each.", - Subject: eachAttr.Expr.Range().Ptr(), - Expression: eachAttr.Expr, - EvalContext: b.forEachCtx, - }) - return nil, diags - } - - //// iterator attribute - - iteratorName := blockS.Type - if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil { - itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr) - diags = append(diags, itDiags...) - if itDiags.HasErrors() { - return nil, diags - } - - if len(itTraversal) != 1 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic iterator name", - Detail: "Dynamic iterator must be a single variable name.", - Subject: itTraversal.SourceRange().Ptr(), - }) - return nil, diags - } - - iteratorName = itTraversal.RootName() - } - - var labelExprs []hcl.Expression - if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil { - var labelDiags hcl.Diagnostics - labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr) - diags = append(diags, labelDiags...) - if labelDiags.HasErrors() { - return nil, diags - } - - if len(labelExprs) > len(blockS.LabelNames) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous dynamic block label", - Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), - Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(), - }) - return nil, diags - } else if len(labelExprs) < len(blockS.LabelNames) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Insufficient dynamic block labels", - Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), - Subject: labelsAttr.Expr.Range().Ptr(), - }) - return nil, diags - } - } - - // Since our schema requests only blocks of type "content", we can assume - // that all entries in specContent.Blocks are content blocks. - if len(specContent.Blocks) == 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing dynamic content block", - Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.", - Subject: &specContent.MissingItemRange, - }) - return nil, diags - } - if len(specContent.Blocks) > 1 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous dynamic content block", - Detail: "Only one nested content block is allowed for each dynamic block.", - Subject: &specContent.Blocks[1].DefRange, - }) - return nil, diags - } - - return &expandSpec{ - blockType: blockS.Type, - blockTypeRange: rawSpec.LabelRanges[0], - defRange: rawSpec.DefRange, - forEachVal: eachVal, - iteratorName: iteratorName, - labelExprs: labelExprs, - contentBody: specContent.Blocks[0].Body, - }, diags -} - -func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) { - var diags hcl.Diagnostics - var labels []string - var labelRanges []hcl.Range - lCtx := i.EvalContext(ctx) - for _, labelExpr := range s.labelExprs { - labelVal, labelDiags := labelExpr.Value(lCtx) - diags = append(diags, labelDiags...) - if labelDiags.HasErrors() { - return nil, diags - } - - var convErr error - labelVal, convErr = convert.Convert(labelVal, cty.String) - if convErr != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr), - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - if labelVal.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: "Cannot use a null value as a dynamic block label.", - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - if !labelVal.IsKnown() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.", - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - - labels = append(labels, labelVal.AsString()) - labelRanges = append(labelRanges, labelExpr.Range()) - } - - block := &hcl.Block{ - Type: s.blockType, - TypeRange: s.blockTypeRange, - Labels: labels, - LabelRanges: labelRanges, - DefRange: s.defRange, - Body: s.contentBody, - } - - return block, diags -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go deleted file mode 100644 index 460a1d2a..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go +++ /dev/null @@ -1,42 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -type exprWrap struct { - hcl.Expression - i *iteration -} - -func (e exprWrap) Variables() []hcl.Traversal { - raw := e.Expression.Variables() - ret := make([]hcl.Traversal, 0, len(raw)) - - // Filter out traversals that refer to our iterator name or any - // iterator we've inherited; we're going to provide those in - // our Value wrapper, so the caller doesn't need to know about them. - for _, traversal := range raw { - rootName := traversal.RootName() - if rootName == e.i.IteratorName { - continue - } - if _, inherited := e.i.Inherited[rootName]; inherited { - continue - } - ret = append(ret, traversal) - } - return ret -} - -func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - extCtx := e.i.EvalContext(ctx) - return e.Expression.Value(extCtx) -} - -// UnwrapExpression returns the expression being wrapped by this instance. -// This allows the original expression to be recovered by hcl.UnwrapExpression. -func (e exprWrap) UnwrapExpression() hcl.Expression { - return e.Expression -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go deleted file mode 100644 index c5663886..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go +++ /dev/null @@ -1,66 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -type iteration struct { - IteratorName string - Key cty.Value - Value cty.Value - Inherited map[string]*iteration -} - -func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration { - return &iteration{ - IteratorName: s.iteratorName, - Key: key, - Value: value, - Inherited: s.inherited, - } -} - -func (i *iteration) Object() cty.Value { - return cty.ObjectVal(map[string]cty.Value{ - "key": i.Key, - "value": i.Value, - }) -} - -func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext { - new := base.NewChild() - - if i != nil { - new.Variables = map[string]cty.Value{} - for name, otherIt := range i.Inherited { - new.Variables[name] = otherIt.Object() - } - new.Variables[i.IteratorName] = i.Object() - } - - return new -} - -func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration { - if i == nil { - // Create entirely new root iteration, then - return &iteration{ - IteratorName: iteratorName, - Key: key, - Value: value, - } - } - - inherited := map[string]*iteration{} - for name, otherIt := range i.Inherited { - inherited[name] = otherIt - } - inherited[i.IteratorName] = i - return &iteration{ - IteratorName: iteratorName, - Key: key, - Value: value, - Inherited: inherited, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go deleted file mode 100644 index a5bfd94e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go +++ /dev/null @@ -1,47 +0,0 @@ -// Package dynblock provides an extension to HCL that allows dynamic -// declaration of nested blocks in certain contexts via a special block type -// named "dynamic". -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" -) - -// Expand "dynamic" blocks in the given body, returning a new body that -// has those blocks expanded. -// -// The given EvalContext is used when evaluating "for_each" and "labels" -// attributes within dynamic blocks, allowing those expressions access to -// variables and functions beyond the iterator variable created by the -// iteration. -// -// Expand returns no diagnostics because no blocks are actually expanded -// until a call to Content or PartialContent on the returned body, which -// will then expand only the blocks selected by the schema. -// -// "dynamic" blocks are also expanded automatically within nested blocks -// in the given body, including within other dynamic blocks, thus allowing -// multi-dimensional iteration. However, it is not possible to -// dynamically-generate the "dynamic" blocks themselves except through nesting. -// -// parent { -// dynamic "child" { -// for_each = child_objs -// content { -// dynamic "grandchild" { -// for_each = child.value.children -// labels = [grandchild.key] -// content { -// parent_key = child.key -// value = grandchild.value -// } -// } -// } -// } -// } -func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body { - return &expandBody{ - original: body, - forEachCtx: ctx, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go deleted file mode 100644 index b3907d6e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go +++ /dev/null @@ -1,50 +0,0 @@ -package dynblock - -import "github.com/hashicorp/hcl/v2" - -var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{ - Type: "dynamic", - LabelNames: []string{"type"}, -} - -var dynamicBlockBodySchemaLabels = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: true, - }, - { - Name: "iterator", - Required: false, - }, - { - Name: "labels", - Required: true, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - LabelNames: nil, - }, - }, -} - -var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: true, - }, - { - Name: "iterator", - Required: false, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - LabelNames: nil, - }, - }, -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go deleted file mode 100644 index ce98259a..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go +++ /dev/null @@ -1,84 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// unknownBody is a funny body that just reports everything inside it as -// unknown. It uses a given other body as a sort of template for what attributes -// and blocks are inside -- including source location information -- but -// subsitutes unknown values of unknown type for all attributes. -// -// This rather odd process is used to handle expansion of dynamic blocks whose -// for_each expression is unknown. Since a block cannot itself be unknown, -// we instead arrange for everything _inside_ the block to be unknown instead, -// to give the best possible approximation. -type unknownBody struct { - template hcl.Body -} - -var _ hcl.Body = unknownBody{} - -func (b unknownBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, diags := b.template.Content(schema) - content = b.fixupContent(content) - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return content, diags -} - -func (b unknownBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - content, remain, diags := b.template.PartialContent(schema) - content = b.fixupContent(content) - remain = unknownBody{remain} // remaining content must also be wrapped - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return content, remain, diags -} - -func (b unknownBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - attrs, diags := b.template.JustAttributes() - attrs = b.fixupAttrs(attrs) - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return attrs, diags -} - -func (b unknownBody) MissingItemRange() hcl.Range { - return b.template.MissingItemRange() -} - -func (b unknownBody) fixupContent(got *hcl.BodyContent) *hcl.BodyContent { - ret := &hcl.BodyContent{} - ret.Attributes = b.fixupAttrs(got.Attributes) - if len(got.Blocks) > 0 { - ret.Blocks = make(hcl.Blocks, 0, len(got.Blocks)) - for _, gotBlock := range got.Blocks { - new := *gotBlock // shallow copy - new.Body = unknownBody{gotBlock.Body} // nested content must also be marked unknown - ret.Blocks = append(ret.Blocks, &new) - } - } - - return ret -} - -func (b unknownBody) fixupAttrs(got hcl.Attributes) hcl.Attributes { - if len(got) == 0 { - return nil - } - ret := make(hcl.Attributes, len(got)) - for name, gotAttr := range got { - new := *gotAttr // shallow copy - new.Expr = hcl.StaticExpr(cty.DynamicVal, gotAttr.Expr.Range()) - ret[name] = &new - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go deleted file mode 100644 index 19233929..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go +++ /dev/null @@ -1,209 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// WalkVariables begins the recursive process of walking all expressions and -// nested blocks in the given body and its child bodies while taking into -// account any "dynamic" blocks. -// -// This function requires that the caller walk through the nested block -// structure in the given body level-by-level so that an appropriate schema -// can be provided at each level to inform further processing. This workflow -// is thus easiest to use for calling applications that have some higher-level -// schema representation available with which to drive this multi-step -// process. If your application uses the hcldec package, you may be able to -// use VariablesHCLDec instead for a more automatic approach. -func WalkVariables(body hcl.Body) WalkVariablesNode { - return WalkVariablesNode{ - body: body, - includeContent: true, - } -} - -// WalkExpandVariables is like Variables but it includes only the variables -// required for successful block expansion, ignoring any variables referenced -// inside block contents. The result is the minimal set of all variables -// required for a call to Expand, excluding variables that would only be -// needed to subsequently call Content or PartialContent on the expanded -// body. -func WalkExpandVariables(body hcl.Body) WalkVariablesNode { - return WalkVariablesNode{ - body: body, - } -} - -type WalkVariablesNode struct { - body hcl.Body - it *iteration - - includeContent bool -} - -type WalkVariablesChild struct { - BlockTypeName string - Node WalkVariablesNode -} - -// Body returns the HCL Body associated with the child node, in case the caller -// wants to do some sort of inspection of it in order to decide what schema -// to pass to Visit. -// -// Most implementations should just fetch a fixed schema based on the -// BlockTypeName field and not access this. Deciding on a schema dynamically -// based on the body is a strange thing to do and generally necessary only if -// your caller is already doing other bizarre things with HCL bodies. -func (c WalkVariablesChild) Body() hcl.Body { - return c.Node.body -} - -// Visit returns the variable traversals required for any "dynamic" blocks -// directly in the body associated with this node, and also returns any child -// nodes that must be visited in order to continue the walk. -// -// Each child node has its associated block type name given in its BlockTypeName -// field, which the calling application should use to determine the appropriate -// schema for the content of each child node and pass it to the child node's -// own Visit method to continue the walk recursively. -func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) { - extSchema := n.extendSchema(schema) - container, _, _ := n.body.PartialContent(extSchema) - if container == nil { - return vars, children - } - - children = make([]WalkVariablesChild, 0, len(container.Blocks)) - - if n.includeContent { - for _, attr := range container.Attributes { - for _, traversal := range attr.Expr.Variables() { - var ours, inherited bool - if n.it != nil { - ours = traversal.RootName() == n.it.IteratorName - _, inherited = n.it.Inherited[traversal.RootName()] - } - - if !(ours || inherited) { - vars = append(vars, traversal) - } - } - } - } - - for _, block := range container.Blocks { - switch block.Type { - - case "dynamic": - blockTypeName := block.Labels[0] - inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema) - if inner == nil { - continue - } - - iteratorName := blockTypeName - if attr, exists := inner.Attributes["iterator"]; exists { - iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr) - if len(iterTraversal) == 0 { - // Ignore this invalid dynamic block, since it'll produce - // an error if someone tries to extract content from it - // later anyway. - continue - } - iteratorName = iterTraversal.RootName() - } - blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal) - - if attr, exists := inner.Attributes["for_each"]; exists { - // Filter out iterator names inherited from parent blocks - for _, traversal := range attr.Expr.Variables() { - if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited { - vars = append(vars, traversal) - } - } - } - if attr, exists := inner.Attributes["labels"]; exists { - // Filter out both our own iterator name _and_ those inherited - // from parent blocks, since we provide _both_ of these to the - // label expressions. - for _, traversal := range attr.Expr.Variables() { - ours := traversal.RootName() == iteratorName - _, inherited := blockIt.Inherited[traversal.RootName()] - - if !(ours || inherited) { - vars = append(vars, traversal) - } - } - } - - for _, contentBlock := range inner.Blocks { - // We only request "content" blocks in our schema, so we know - // any blocks we find here will be content blocks. We require - // exactly one content block for actual expansion, but we'll - // be more liberal here so that callers can still collect - // variables from erroneous "dynamic" blocks. - children = append(children, WalkVariablesChild{ - BlockTypeName: blockTypeName, - Node: WalkVariablesNode{ - body: contentBlock.Body, - it: blockIt, - includeContent: n.includeContent, - }, - }) - } - - default: - children = append(children, WalkVariablesChild{ - BlockTypeName: block.Type, - Node: WalkVariablesNode{ - body: block.Body, - it: n.it, - includeContent: n.includeContent, - }, - }) - - } - } - - return vars, children -} - -func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { - // We augment the requested schema to also include our special "dynamic" - // block type, since then we'll get instances of it interleaved with - // all of the literal child blocks we must also include. - extSchema := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), - } - copy(extSchema.Blocks, schema.Blocks) - extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) - - return extSchema -} - -// This is a more relaxed schema than what's in schema.go, since we -// want to maximize the amount of variables we can find even if there -// are erroneous blocks. -var variableDetectionInnerSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: false, - }, - { - Name: "labels", - Required: false, - }, - { - Name: "iterator", - Required: false, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - }, - }, -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go deleted file mode 100644 index 907ef3eb..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go +++ /dev/null @@ -1,43 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" -) - -// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec -// specification to automatically drive the recursive walk through nested -// blocks in the given body. -// -// This is a drop-in replacement for hcldec.Variables which is able to treat -// blocks of type "dynamic" in the same special way that dynblock.Expand would, -// exposing both the variables referenced in the "for_each" and "labels" -// arguments and variables used in the nested "content" block. -func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { - rootNode := WalkVariables(body) - return walkVariablesWithHCLDec(rootNode, spec) -} - -// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the -// minimal set of variables required to call Expand, ignoring variables that -// are referenced only inside normal block contents. See WalkExpandVariables -// for more information. -func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { - rootNode := WalkExpandVariables(body) - return walkVariablesWithHCLDec(rootNode, spec) -} - -func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal { - vars, children := node.Visit(hcldec.ImpliedSchema(spec)) - - if len(children) > 0 { - childSpecs := hcldec.ChildBlockTypes(spec) - for _, child := range children { - if childSpec, exists := childSpecs[child.BlockTypeName]; exists { - vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...) - } - } - } - - return vars -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md deleted file mode 100644 index ec709470..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# HCL Type Expressions Extension - -This HCL extension defines a convention for describing HCL types using function -call and variable reference syntax, allowing configuration formats to include -type information provided by users. - -The type syntax is processed statically from a hcl.Expression, so it cannot -use any of the usual language operators. This is similar to type expressions -in statically-typed programming languages. - -```hcl -variable "example" { - type = list(string) -} -``` - -The extension is built using the `hcl.ExprAsKeyword` and `hcl.ExprCall` -functions, and so it relies on the underlying syntax to define how "keyword" -and "call" are interpreted. The above shows how they are interpreted in -the HCL native syntax, while the following shows the same information -expressed in JSON: - -```json -{ - "variable": { - "example": { - "type": "list(string)" - } - } -} -``` - -Notice that since we have additional contextual information that we intend -to allow only calls and keywords the JSON syntax is able to parse the given -string directly as an expression, rather than as a template as would be -the case for normal expression evaluation. - -For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl/v2/ext/typeexpr). - -## Type Expression Syntax - -When expressed in the native syntax, the following expressions are permitted -in a type expression: - -* `string` - string -* `bool` - boolean -* `number` - number -* `any` - `cty.DynamicPseudoType` (in function `TypeConstraint` only) -* `list()` - list of the type given as an argument -* `set()` - set of the type given as an argument -* `map()` - map of the type given as an argument -* `tuple([])` - tuple with the element types given in the single list argument -* `object({=, ...}` - object with the attributes and corresponding types given in the single map argument - -For example: - -* `list(string)` -* `object({name=string,age=number})` -* `map(object({name=string,age=number}))` - -Note that the object constructor syntax is not fully-general for all possible -object types because it requires the attribute names to be valid identifiers. -In practice it is expected that any time an object type is being fixed for -type checking it will be one that has identifiers as its attributes; object -types with weird attributes generally show up only from arbitrary object -constructors in configuration files, which are usually treated either as maps -or as the dynamic pseudo-type. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go deleted file mode 100644 index c4b37957..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package typeexpr extends HCL with a convention for describing HCL types -// within configuration files. -// -// The type syntax is processed statically from a hcl.Expression, so it cannot -// use any of the usual language operators. This is similar to type expressions -// in statically-typed programming languages. -// -// variable "example" { -// type = list(string) -// } -package typeexpr diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go deleted file mode 100644 index 11b06897..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go +++ /dev/null @@ -1,196 +0,0 @@ -package typeexpr - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -const invalidTypeSummary = "Invalid type specification" - -// getType is the internal implementation of both Type and TypeConstraint, -// using the passed flag to distinguish. When constraint is false, the "any" -// keyword will produce an error. -func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) { - // First we'll try for one of our keywords - kw := hcl.ExprAsKeyword(expr) - switch kw { - case "bool": - return cty.Bool, nil - case "string": - return cty.String, nil - case "number": - return cty.Number, nil - case "any": - if constraint { - return cty.DynamicPseudoType, nil - } - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw), - Subject: expr.Range().Ptr(), - }} - case "list", "map", "set": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw), - Subject: expr.Range().Ptr(), - }} - case "object": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", - Subject: expr.Range().Ptr(), - }} - case "tuple": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The tuple type constructor requires one argument specifying the element types as a list.", - Subject: expr.Range().Ptr(), - }} - case "": - // okay! we'll fall through and try processing as a call, then. - default: - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw), - Subject: expr.Range().Ptr(), - }} - } - - // If we get down here then our expression isn't just a keyword, so we'll - // try to process it as a call instead. - call, diags := hcl.ExprCall(expr) - if diags.HasErrors() { - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).", - Subject: expr.Range().Ptr(), - }} - } - - switch call.Name { - case "bool", "string", "number", "any": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name), - Subject: &call.ArgsRange, - }} - } - - if len(call.Arguments) != 1 { - contextRange := call.ArgsRange - subjectRange := call.ArgsRange - if len(call.Arguments) > 1 { - // If we have too many arguments (as opposed to too _few_) then - // we'll highlight the extraneous arguments as the diagnostic - // subject. - subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range()) - } - - switch call.Name { - case "list", "set", "map": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name), - Subject: &subjectRange, - Context: &contextRange, - }} - case "object": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", - Subject: &subjectRange, - Context: &contextRange, - }} - case "tuple": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The tuple type constructor requires one argument specifying the element types as a list.", - Subject: &subjectRange, - Context: &contextRange, - }} - } - } - - switch call.Name { - - case "list": - ety, diags := getType(call.Arguments[0], constraint) - return cty.List(ety), diags - case "set": - ety, diags := getType(call.Arguments[0], constraint) - return cty.Set(ety), diags - case "map": - ety, diags := getType(call.Arguments[0], constraint) - return cty.Map(ety), diags - case "object": - attrDefs, diags := hcl.ExprMap(call.Arguments[0]) - if diags.HasErrors() { - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.", - Subject: call.Arguments[0].Range().Ptr(), - Context: expr.Range().Ptr(), - }} - } - - atys := make(map[string]cty.Type) - for _, attrDef := range attrDefs { - attrName := hcl.ExprAsKeyword(attrDef.Key) - if attrName == "" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Object constructor map keys must be attribute names.", - Subject: attrDef.Key.Range().Ptr(), - Context: expr.Range().Ptr(), - }) - continue - } - aty, attrDiags := getType(attrDef.Value, constraint) - diags = append(diags, attrDiags...) - atys[attrName] = aty - } - return cty.Object(atys), diags - case "tuple": - elemDefs, diags := hcl.ExprList(call.Arguments[0]) - if diags.HasErrors() { - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Tuple type constructor requires a list of element types.", - Subject: call.Arguments[0].Range().Ptr(), - Context: expr.Range().Ptr(), - }} - } - etys := make([]cty.Type, len(elemDefs)) - for i, defExpr := range elemDefs { - ety, elemDiags := getType(defExpr, constraint) - diags = append(diags, elemDiags...) - etys[i] = ety - } - return cty.Tuple(etys), diags - default: - // Can't access call.Arguments in this path because we've not validated - // that it contains exactly one expression here. - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name), - Subject: expr.Range().Ptr(), - }} - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go deleted file mode 100644 index 3b8f618f..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go +++ /dev/null @@ -1,129 +0,0 @@ -package typeexpr - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// Type attempts to process the given expression as a type expression and, if -// successful, returns the resulting type. If unsuccessful, error diagnostics -// are returned. -func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { - return getType(expr, false) -} - -// TypeConstraint attempts to parse the given expression as a type constraint -// and, if successful, returns the resulting type. If unsuccessful, error -// diagnostics are returned. -// -// A type constraint has the same structure as a type, but it additionally -// allows the keyword "any" to represent cty.DynamicPseudoType, which is often -// used as a wildcard in type checking and type conversion operations. -func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { - return getType(expr, true) -} - -// TypeString returns a string rendering of the given type as it would be -// expected to appear in the HCL native syntax. -// -// This is primarily intended for showing types to the user in an application -// that uses typexpr, where the user can be assumed to be familiar with the -// type expression syntax. In applications that do not use typeexpr these -// results may be confusing to the user and so type.FriendlyName may be -// preferable, even though it's less precise. -// -// TypeString produces reasonable results only for types like what would be -// produced by the Type and TypeConstraint functions. In particular, it cannot -// support capsule types. -func TypeString(ty cty.Type) string { - // Easy cases first - switch ty { - case cty.String: - return "string" - case cty.Bool: - return "bool" - case cty.Number: - return "number" - case cty.DynamicPseudoType: - return "any" - } - - if ty.IsCapsuleType() { - panic("TypeString does not support capsule types") - } - - if ty.IsCollectionType() { - ety := ty.ElementType() - etyString := TypeString(ety) - switch { - case ty.IsListType(): - return fmt.Sprintf("list(%s)", etyString) - case ty.IsSetType(): - return fmt.Sprintf("set(%s)", etyString) - case ty.IsMapType(): - return fmt.Sprintf("map(%s)", etyString) - default: - // Should never happen because the above is exhaustive - panic("unsupported collection type") - } - } - - if ty.IsObjectType() { - var buf bytes.Buffer - buf.WriteString("object({") - atys := ty.AttributeTypes() - names := make([]string, 0, len(atys)) - for name := range atys { - names = append(names, name) - } - sort.Strings(names) - first := true - for _, name := range names { - aty := atys[name] - if !first { - buf.WriteByte(',') - } - if !hclsyntax.ValidIdentifier(name) { - // Should never happen for any type produced by this package, - // but we'll do something reasonable here just so we don't - // produce garbage if someone gives us a hand-assembled object - // type that has weird attribute names. - // Using Go-style quoting here isn't perfect, since it doesn't - // exactly match HCL syntax, but it's fine for an edge-case. - buf.WriteString(fmt.Sprintf("%q", name)) - } else { - buf.WriteString(name) - } - buf.WriteByte('=') - buf.WriteString(TypeString(aty)) - first = false - } - buf.WriteString("})") - return buf.String() - } - - if ty.IsTupleType() { - var buf bytes.Buffer - buf.WriteString("tuple([") - etys := ty.TupleElementTypes() - first := true - for _, ety := range etys { - if !first { - buf.WriteByte(',') - } - buf.WriteString(TypeString(ety)) - first = false - } - buf.WriteString("])") - return buf.String() - } - - // Should never happen because we covered all cases above. - panic(fmt.Errorf("unsupported type %#v", ty)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go deleted file mode 100644 index 7ba08eee..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go +++ /dev/null @@ -1,304 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/gocty" -) - -// DecodeBody extracts the configuration within the given body into the given -// value. This value must be a non-nil pointer to either a struct or -// a map, where in the former case the configuration will be decoded using -// struct tags and in the latter case only attributes are allowed and their -// values are decoded into the map. -// -// The given EvalContext is used to resolve any variables or functions in -// expressions encountered while decoding. This may be nil to require only -// constant values, for simple applications that do not support variables or -// functions. -// -// The returned diagnostics should be inspected with its HasErrors method to -// determine if the populated value is valid and complete. If error diagnostics -// are returned then the given value may have been partially-populated but -// may still be accessed by a careful caller for static analysis and editor -// integration use-cases. -func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { - rv := reflect.ValueOf(val) - if rv.Kind() != reflect.Ptr { - panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) - } - - return decodeBodyToValue(body, ctx, rv.Elem()) -} - -func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { - et := val.Type() - switch et.Kind() { - case reflect.Struct: - return decodeBodyToStruct(body, ctx, val) - case reflect.Map: - return decodeBodyToMap(body, ctx, val) - default: - panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) - } -} - -func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { - schema, partial := ImpliedBodySchema(val.Interface()) - - var content *hcl.BodyContent - var leftovers hcl.Body - var diags hcl.Diagnostics - if partial { - content, leftovers, diags = body.PartialContent(schema) - } else { - content, diags = body.Content(schema) - } - if content == nil { - return diags - } - - tags := getFieldTags(val.Type()) - - if tags.Remain != nil { - fieldIdx := *tags.Remain - field := val.Type().Field(fieldIdx) - fieldV := val.Field(fieldIdx) - switch { - case bodyType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(leftovers)) - case attrsType.AssignableTo(field.Type): - attrs, attrsDiags := leftovers.JustAttributes() - if len(attrsDiags) > 0 { - diags = append(diags, attrsDiags...) - } - fieldV.Set(reflect.ValueOf(attrs)) - default: - diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) - } - } - - for name, fieldIdx := range tags.Attributes { - attr := content.Attributes[name] - field := val.Type().Field(fieldIdx) - fieldV := val.Field(fieldIdx) - - if attr == nil { - if !exprType.AssignableTo(field.Type) { - continue - } - - // As a special case, if the target is of type hcl.Expression then - // we'll assign an actual expression that evalues to a cty null, - // so the caller can deal with it within the cty realm rather - // than within the Go realm. - synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) - fieldV.Set(reflect.ValueOf(synthExpr)) - continue - } - - switch { - case attrType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(attr)) - case exprType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(attr.Expr)) - default: - diags = append(diags, DecodeExpression( - attr.Expr, ctx, fieldV.Addr().Interface(), - )...) - } - } - - blocksByType := content.Blocks.ByType() - - for typeName, fieldIdx := range tags.Blocks { - blocks := blocksByType[typeName] - field := val.Type().Field(fieldIdx) - - ty := field.Type - isSlice := false - isPtr := false - if ty.Kind() == reflect.Slice { - isSlice = true - ty = ty.Elem() - } - if ty.Kind() == reflect.Ptr { - isPtr = true - ty = ty.Elem() - } - - if len(blocks) > 1 && !isSlice { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", typeName), - Detail: fmt.Sprintf( - "Only one %s block is allowed. Another was defined at %s.", - typeName, blocks[0].DefRange.String(), - ), - Subject: &blocks[1].DefRange, - }) - continue - } - - if len(blocks) == 0 { - if isSlice || isPtr { - val.Field(fieldIdx).Set(reflect.Zero(field.Type)) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", typeName), - Detail: fmt.Sprintf("A %s block is required.", typeName), - Subject: body.MissingItemRange().Ptr(), - }) - } - continue - } - - switch { - - case isSlice: - elemType := ty - if isPtr { - elemType = reflect.PtrTo(ty) - } - sli := reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) - - for i, block := range blocks { - if isPtr { - v := reflect.New(ty) - diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) - sli.Index(i).Set(v) - } else { - diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) - } - } - - val.Field(fieldIdx).Set(sli) - - default: - block := blocks[0] - if isPtr { - v := reflect.New(ty) - diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) - val.Field(fieldIdx).Set(v) - } else { - diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) - } - - } - - } - - return diags -} - -func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { - attrs, diags := body.JustAttributes() - if attrs == nil { - return diags - } - - mv := reflect.MakeMap(v.Type()) - - for k, attr := range attrs { - switch { - case attrType.AssignableTo(v.Type().Elem()): - mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) - case exprType.AssignableTo(v.Type().Elem()): - mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) - default: - ev := reflect.New(v.Type().Elem()) - diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...) - mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) - } - } - - v.Set(mv) - - return diags -} - -func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { - var diags hcl.Diagnostics - - ty := v.Type() - - switch { - case blockType.AssignableTo(ty): - v.Elem().Set(reflect.ValueOf(block)) - case bodyType.AssignableTo(ty): - v.Elem().Set(reflect.ValueOf(block.Body)) - case attrsType.AssignableTo(ty): - attrs, attrsDiags := block.Body.JustAttributes() - if len(attrsDiags) > 0 { - diags = append(diags, attrsDiags...) - } - v.Elem().Set(reflect.ValueOf(attrs)) - default: - diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...) - - if len(block.Labels) > 0 { - blockTags := getFieldTags(ty) - for li, lv := range block.Labels { - lfieldIdx := blockTags.Labels[li].FieldIndex - v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) - } - } - - } - - return diags -} - -// DecodeExpression extracts the value of the given expression into the given -// value. This value must be something that gocty is able to decode into, -// since the final decoding is delegated to that package. -// -// The given EvalContext is used to resolve any variables or functions in -// expressions encountered while decoding. This may be nil to require only -// constant values, for simple applications that do not support variables or -// functions. -// -// The returned diagnostics should be inspected with its HasErrors method to -// determine if the populated value is valid and complete. If error diagnostics -// are returned then the given value may have been partially-populated but -// may still be accessed by a careful caller for static analysis and editor -// integration use-cases. -func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { - srcVal, diags := expr.Value(ctx) - - convTy, err := gocty.ImpliedType(val) - if err != nil { - panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) - } - - srcVal, err = convert.Convert(srcVal, convTy) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), - Subject: expr.StartRange().Ptr(), - Context: expr.Range().Ptr(), - }) - return diags - } - - err = gocty.FromCtyValue(srcVal, val) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), - Subject: expr.StartRange().Ptr(), - Context: expr.Range().Ptr(), - }) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go deleted file mode 100644 index aa3c6ea9..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go +++ /dev/null @@ -1,53 +0,0 @@ -// Package gohcl allows decoding HCL configurations into Go data structures. -// -// It provides a convenient and concise way of describing the schema for -// configuration and then accessing the resulting data via native Go -// types. -// -// A struct field tag scheme is used, similar to other decoding and -// unmarshalling libraries. The tags are formatted as in the following example: -// -// ThingType string `hcl:"thing_type,attr"` -// -// Within each tag there are two comma-separated tokens. The first is the -// name of the corresponding construct in configuration, while the second -// is a keyword giving the kind of construct expected. The following -// kind keywords are supported: -// -// attr (the default) indicates that the value is to be populated from an attribute -// block indicates that the value is to populated from a block -// label indicates that the value is to populated from a block label -// remain indicates that the value is to be populated from the remaining body after populating other fields -// -// "attr" fields may either be of type *hcl.Expression, in which case the raw -// expression is assigned, or of any type accepted by gocty, in which case -// gocty will be used to assign the value to a native Go type. -// -// "block" fields may be of type *hcl.Block or hcl.Body, in which case the -// corresponding raw value is assigned, or may be a struct that recursively -// uses the same tags. Block fields may also be slices of any of these types, -// in which case multiple blocks of the corresponding type are decoded into -// the slice. -// -// "label" fields are considered only in a struct used as the type of a field -// marked as "block", and are used sequentially to capture the labels of -// the blocks being decoded. In this case, the name token is used only as -// an identifier for the label in diagnostic messages. -// -// "remain" can be placed on a single field that may be either of type -// hcl.Body or hcl.Attributes, in which case any remaining body content is -// placed into this field for delayed processing. If no "remain" field is -// present then any attributes or blocks not matched by another valid tag -// will cause an error diagnostic. -// -// Only a subset of this tagging/typing vocabulary is supported for the -// "Encode" family of functions. See the EncodeIntoBody docs for full details -// on the constraints there. -// -// Broadly-speaking this package deals with two types of error. The first is -// errors in the configuration itself, which are returned as diagnostics -// written with the configuration author as the target audience. The second -// is bugs in the calling program, such as invalid struct tags, which are -// surfaced via panics since there can be no useful runtime handling of such -// errors and they should certainly not be returned to the user as diagnostics. -package gohcl diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go deleted file mode 100644 index d612e09c..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go +++ /dev/null @@ -1,191 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - "sort" - - "github.com/hashicorp/hcl/v2/hclwrite" - "github.com/zclconf/go-cty/cty/gocty" -) - -// EncodeIntoBody replaces the contents of the given hclwrite Body with -// attributes and blocks derived from the given value, which must be a -// struct value or a pointer to a struct value with the struct tags defined -// in this package. -// -// This function can work only with fully-decoded data. It will ignore any -// fields tagged as "remain", any fields that decode attributes into either -// hcl.Attribute or hcl.Expression values, and any fields that decode blocks -// into hcl.Attributes values. This function does not have enough information -// to complete the decoding of these types. -// -// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock -// to produce a whole hclwrite.Block including block labels. -// -// As long as a suitable value is given to encode and the destination body -// is non-nil, this function will always complete. It will panic in case of -// any errors in the calling program, such as passing an inappropriate type -// or a nil body. -// -// The layout of the resulting HCL source is derived from the ordering of -// the struct fields, with blank lines around nested blocks of different types. -// Fields representing attributes should usually precede those representing -// blocks so that the attributes can group togather in the result. For more -// control, use the hclwrite API directly. -func EncodeIntoBody(val interface{}, dst *hclwrite.Body) { - rv := reflect.ValueOf(val) - ty := rv.Type() - if ty.Kind() == reflect.Ptr { - rv = rv.Elem() - ty = rv.Type() - } - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) - } - - tags := getFieldTags(ty) - populateBody(rv, ty, tags, dst) -} - -// EncodeAsBlock creates a new hclwrite.Block populated with the data from -// the given value, which must be a struct or pointer to struct with the -// struct tags defined in this package. -// -// If the given struct type has fields tagged with "label" tags then they -// will be used in order to annotate the created block with labels. -// -// This function has the same constraints as EncodeIntoBody and will panic -// if they are violated. -func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block { - rv := reflect.ValueOf(val) - ty := rv.Type() - if ty.Kind() == reflect.Ptr { - rv = rv.Elem() - ty = rv.Type() - } - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) - } - - tags := getFieldTags(ty) - labels := make([]string, len(tags.Labels)) - for i, lf := range tags.Labels { - lv := rv.Field(lf.FieldIndex) - // We just stringify whatever we find. It should always be a string - // but if not then we'll still do something reasonable. - labels[i] = fmt.Sprintf("%s", lv.Interface()) - } - - block := hclwrite.NewBlock(blockType, labels) - populateBody(rv, ty, tags, block.Body()) - return block -} - -func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) { - nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks)) - namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks)) - for n, i := range tags.Attributes { - nameIdxs[n] = i - namesOrder = append(namesOrder, n) - } - for n, i := range tags.Blocks { - nameIdxs[n] = i - namesOrder = append(namesOrder, n) - } - sort.SliceStable(namesOrder, func(i, j int) bool { - ni, nj := namesOrder[i], namesOrder[j] - return nameIdxs[ni] < nameIdxs[nj] - }) - - dst.Clear() - - prevWasBlock := false - for _, name := range namesOrder { - fieldIdx := nameIdxs[name] - field := ty.Field(fieldIdx) - fieldTy := field.Type - fieldVal := rv.Field(fieldIdx) - - if fieldTy.Kind() == reflect.Ptr { - fieldTy = fieldTy.Elem() - fieldVal = fieldVal.Elem() - } - - if _, isAttr := tags.Attributes[name]; isAttr { - - if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) { - continue // ignore undecoded fields - } - if !fieldVal.IsValid() { - continue // ignore (field value is nil pointer) - } - if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() { - continue // ignore - } - if prevWasBlock { - dst.AppendNewline() - prevWasBlock = false - } - - valTy, err := gocty.ImpliedType(fieldVal.Interface()) - if err != nil { - panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err)) - } - - val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy) - if err != nil { - // This should never happen, since we should always be able - // to decode into the implied type. - panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err)) - } - - dst.SetAttributeValue(name, val) - - } else { // must be a block, then - elemTy := fieldTy - isSeq := false - if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array { - isSeq = true - elemTy = elemTy.Elem() - } - - if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) { - continue // ignore undecoded fields - } - prevWasBlock = false - - if isSeq { - l := fieldVal.Len() - for i := 0; i < l; i++ { - elemVal := fieldVal.Index(i) - if !elemVal.IsValid() { - continue // ignore (elem value is nil pointer) - } - if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() { - continue // ignore - } - block := EncodeAsBlock(elemVal.Interface(), name) - if !prevWasBlock { - dst.AppendNewline() - prevWasBlock = true - } - dst.AppendBlock(block) - } - } else { - if !fieldVal.IsValid() { - continue // ignore (field value is nil pointer) - } - if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() { - continue // ignore - } - block := EncodeAsBlock(fieldVal.Interface(), name) - if !prevWasBlock { - dst.AppendNewline() - prevWasBlock = true - } - dst.AppendBlock(block) - } - } - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go deleted file mode 100644 index ecf6ddba..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go +++ /dev/null @@ -1,174 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the -// given value, which must be a struct value or a pointer to one. If an -// inappropriate value is passed, this function will panic. -// -// The second return argument indicates whether the given struct includes -// a "remain" field, and thus the returned schema is non-exhaustive. -// -// This uses the tags on the fields of the struct to discover how each -// field's value should be expressed within configuration. If an invalid -// mapping is attempted, this function will panic. -func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { - ty := reflect.TypeOf(val) - - if ty.Kind() == reflect.Ptr { - ty = ty.Elem() - } - - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("given value must be struct, not %T", val)) - } - - var attrSchemas []hcl.AttributeSchema - var blockSchemas []hcl.BlockHeaderSchema - - tags := getFieldTags(ty) - - attrNames := make([]string, 0, len(tags.Attributes)) - for n := range tags.Attributes { - attrNames = append(attrNames, n) - } - sort.Strings(attrNames) - for _, n := range attrNames { - idx := tags.Attributes[n] - optional := tags.Optional[n] - field := ty.Field(idx) - - var required bool - - switch { - case field.Type.AssignableTo(exprType): - // If we're decoding to hcl.Expression then absense can be - // indicated via a null value, so we don't specify that - // the field is required during decoding. - required = false - case field.Type.Kind() != reflect.Ptr && !optional: - required = true - default: - required = false - } - - attrSchemas = append(attrSchemas, hcl.AttributeSchema{ - Name: n, - Required: required, - }) - } - - blockNames := make([]string, 0, len(tags.Blocks)) - for n := range tags.Blocks { - blockNames = append(blockNames, n) - } - sort.Strings(blockNames) - for _, n := range blockNames { - idx := tags.Blocks[n] - field := ty.Field(idx) - fty := field.Type - if fty.Kind() == reflect.Slice { - fty = fty.Elem() - } - if fty.Kind() == reflect.Ptr { - fty = fty.Elem() - } - if fty.Kind() != reflect.Struct { - panic(fmt.Sprintf( - "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, - )) - } - ftags := getFieldTags(fty) - var labelNames []string - if len(ftags.Labels) > 0 { - labelNames = make([]string, len(ftags.Labels)) - for i, l := range ftags.Labels { - labelNames[i] = l.Name - } - } - - blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ - Type: n, - LabelNames: labelNames, - }) - } - - partial = tags.Remain != nil - schema = &hcl.BodySchema{ - Attributes: attrSchemas, - Blocks: blockSchemas, - } - return schema, partial -} - -type fieldTags struct { - Attributes map[string]int - Blocks map[string]int - Labels []labelField - Remain *int - Optional map[string]bool -} - -type labelField struct { - FieldIndex int - Name string -} - -func getFieldTags(ty reflect.Type) *fieldTags { - ret := &fieldTags{ - Attributes: map[string]int{}, - Blocks: map[string]int{}, - Optional: map[string]bool{}, - } - - ct := ty.NumField() - for i := 0; i < ct; i++ { - field := ty.Field(i) - tag := field.Tag.Get("hcl") - if tag == "" { - continue - } - - comma := strings.Index(tag, ",") - var name, kind string - if comma != -1 { - name = tag[:comma] - kind = tag[comma+1:] - } else { - name = tag - kind = "attr" - } - - switch kind { - case "attr": - ret.Attributes[name] = i - case "block": - ret.Blocks[name] = i - case "label": - ret.Labels = append(ret.Labels, labelField{ - FieldIndex: i, - Name: name, - }) - case "remain": - if ret.Remain != nil { - panic("only one 'remain' tag is permitted") - } - idx := i // copy, because this loop will continue assigning to i - ret.Remain = &idx - case "optional": - ret.Attributes[name] = i - ret.Optional[name] = true - default: - panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) - } - } - - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go deleted file mode 100644 index a8d00f8f..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go +++ /dev/null @@ -1,16 +0,0 @@ -package gohcl - -import ( - "reflect" - - "github.com/hashicorp/hcl/v2" -) - -var victimExpr hcl.Expression -var victimBody hcl.Body - -var exprType = reflect.TypeOf(&victimExpr).Elem() -var bodyType = reflect.TypeOf(&victimBody).Elem() -var blockType = reflect.TypeOf((*hcl.Block)(nil)) -var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) -var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go deleted file mode 100644 index 71de4519..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go +++ /dev/null @@ -1,21 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -type blockLabel struct { - Value string - Range hcl.Range -} - -func labelsForBlock(block *hcl.Block) []blockLabel { - ret := make([]blockLabel, len(block.Labels)) - for i := range block.Labels { - ret[i] = blockLabel{ - Value: block.Labels[i], - Range: block.LabelRanges[i], - } - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go deleted file mode 100644 index c6e42236..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) { - schema := ImpliedSchema(spec) - - var content *hcl.BodyContent - var diags hcl.Diagnostics - var leftovers hcl.Body - - if partial { - content, leftovers, diags = body.PartialContent(schema) - } else { - content, diags = body.Content(schema) - } - - val, valDiags := spec.decode(content, blockLabels, ctx) - diags = append(diags, valDiags...) - - return val, leftovers, diags -} - -func impliedType(spec Spec) cty.Type { - return spec.impliedType() -} - -func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range { - schema := ImpliedSchema(spec) - content, _, _ := body.PartialContent(schema) - - return spec.sourceRange(content, blockLabels) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go deleted file mode 100644 index 23bfe542..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package hcldec provides a higher-level API for unpacking the content of -// HCL bodies, implemented in terms of the low-level "Content" API exposed -// by the bodies themselves. -// -// It allows decoding an entire nested configuration in a single operation -// by providing a description of the intended structure. -// -// For some applications it may be more convenient to use the "gohcl" -// package, which has a similar purpose but decodes directly into native -// Go data types. hcldec instead targets the cty type system, and thus allows -// a cty-driven application to remain within that type system. -package hcldec diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go deleted file mode 100644 index e2027cfd..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go +++ /dev/null @@ -1,23 +0,0 @@ -package hcldec - -import ( - "encoding/gob" -) - -func init() { - // Every Spec implementation should be registered with gob, so that - // specs can be sent over gob channels, such as using - // github.com/hashicorp/go-plugin with plugins that need to describe - // what shape of configuration they are expecting. - gob.Register(ObjectSpec(nil)) - gob.Register(TupleSpec(nil)) - gob.Register((*AttrSpec)(nil)) - gob.Register((*LiteralSpec)(nil)) - gob.Register((*ExprSpec)(nil)) - gob.Register((*BlockSpec)(nil)) - gob.Register((*BlockListSpec)(nil)) - gob.Register((*BlockSetSpec)(nil)) - gob.Register((*BlockMapSpec)(nil)) - gob.Register((*BlockLabelSpec)(nil)) - gob.Register((*DefaultSpec)(nil)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go deleted file mode 100644 index 1fa548d0..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go +++ /dev/null @@ -1,81 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// Decode interprets the given body using the given specification and returns -// the resulting value. If the given body is not valid per the spec, error -// diagnostics are returned and the returned value is likely to be incomplete. -// -// The ctx argument may be nil, in which case any references to variables or -// functions will produce error diagnostics. -func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - val, _, diags := decode(body, nil, ctx, spec, false) - return val, diags -} - -// PartialDecode is like Decode except that it permits "leftover" items in -// the top-level body, which are returned as a new body to allow for -// further processing. -// -// Any descendent block bodies are _not_ decoded partially and thus must -// be fully described by the given specification. -func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) { - return decode(body, nil, ctx, spec, true) -} - -// ImpliedType returns the value type that should result from decoding the -// given spec. -func ImpliedType(spec Spec) cty.Type { - return impliedType(spec) -} - -// SourceRange interprets the given body using the given specification and -// then returns the source range of the value that would be used to -// fulfill the spec. -// -// This can be used if application-level validation detects value errors, to -// obtain a reasonable SourceRange to use for generated diagnostics. It works -// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...) -// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will -// be less useful the broader the specification, so e.g. a spec that returns -// the entirety of all of the blocks of a given type is likely to be -// _particularly_ arbitrary and useless. -// -// If the given body is not valid per the given spec, the result is best-effort -// and may not actually be something ideal. It's expected that an application -// will already have used Decode or PartialDecode earlier and thus had an -// opportunity to detect and report spec violations. -func SourceRange(body hcl.Body, spec Spec) hcl.Range { - return sourceRange(body, nil, spec) -} - -// ChildBlockTypes returns a map of all of the child block types declared -// by the given spec, with block type names as keys and the associated -// nested body specs as values. -func ChildBlockTypes(spec Spec) map[string]Spec { - ret := map[string]Spec{} - - // visitSameBodyChildren walks through the spec structure, calling - // the given callback for each descendent spec encountered. We are - // interested in the specs that reference attributes and blocks. - var visit visitFunc - visit = func(s Spec) { - if bs, ok := s.(blockSpec); ok { - for _, blockS := range bs.blockHeaderSchemata() { - nested := bs.nestedSpec() - if nested != nil { // nil can be returned to dynamically opt out of this interface - ret[blockS.Type] = nested - } - } - } - - s.visitSameBodyChildren(visit) - } - - visit(spec) - - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go deleted file mode 100644 index ddbe7fa4..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -// ImpliedSchema returns the *hcl.BodySchema implied by the given specification. -// This is the schema that the Decode function will use internally to -// access the content of a given body. -func ImpliedSchema(spec Spec) *hcl.BodySchema { - var attrs []hcl.AttributeSchema - var blocks []hcl.BlockHeaderSchema - - // visitSameBodyChildren walks through the spec structure, calling - // the given callback for each descendent spec encountered. We are - // interested in the specs that reference attributes and blocks. - var visit visitFunc - visit = func(s Spec) { - if as, ok := s.(attrSpec); ok { - attrs = append(attrs, as.attrSchemata()...) - } - - if bs, ok := s.(blockSpec); ok { - blocks = append(blocks, bs.blockHeaderSchemata()...) - } - - s.visitSameBodyChildren(visit) - } - - visit(spec) - - return &hcl.BodySchema{ - Attributes: attrs, - Blocks: blocks, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go deleted file mode 100644 index 6f2d9732..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go +++ /dev/null @@ -1,1567 +0,0 @@ -package hcldec - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// A Spec is a description of how to decode a hcl.Body to a cty.Value. -// -// The various other types in this package whose names end in "Spec" are -// the spec implementations. The most common top-level spec is ObjectSpec, -// which decodes body content into a cty.Value of an object type. -type Spec interface { - // Perform the decode operation on the given body, in the context of - // the given block (which might be null), using the given eval context. - // - // "block" is provided only by the nested calls performed by the spec - // types that work on block bodies. - decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) - - // Return the cty.Type that should be returned when decoding a body with - // this spec. - impliedType() cty.Type - - // Call the given callback once for each of the nested specs that would - // get decoded with the same body and block as the receiver. This should - // not descend into the nested specs used when decoding blocks. - visitSameBodyChildren(cb visitFunc) - - // Determine the source range of the value that would be returned for the - // spec in the given content, in the context of the given block - // (which might be null). If the corresponding item is missing, return - // a place where it might be inserted. - sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range -} - -type visitFunc func(spec Spec) - -// An ObjectSpec is a Spec that produces a cty.Value of an object type whose -// attributes correspond to the keys of the spec map. -type ObjectSpec map[string]Spec - -// attrSpec is implemented by specs that require attributes from the body. -type attrSpec interface { - attrSchemata() []hcl.AttributeSchema -} - -// blockSpec is implemented by specs that require blocks from the body. -type blockSpec interface { - blockHeaderSchemata() []hcl.BlockHeaderSchema - nestedSpec() Spec -} - -// specNeedingVariables is implemented by specs that can use variables -// from the EvalContext, to declare which variables they need. -type specNeedingVariables interface { - variablesNeeded(content *hcl.BodyContent) []hcl.Traversal -} - -func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) { - for _, c := range s { - cb(c) - } -} - -func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - vals := make(map[string]cty.Value, len(s)) - var diags hcl.Diagnostics - - for k, spec := range s { - var kd hcl.Diagnostics - vals[k], kd = spec.decode(content, blockLabels, ctx) - diags = append(diags, kd...) - } - - return cty.ObjectVal(vals), diags -} - -func (s ObjectSpec) impliedType() cty.Type { - if len(s) == 0 { - return cty.EmptyObject - } - - attrTypes := make(map[string]cty.Type) - for k, childSpec := range s { - attrTypes[k] = childSpec.impliedType() - } - return cty.Object(attrTypes) -} - -func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // This is not great, but the best we can do. In practice, it's rather - // strange to ask for the source range of an entire top-level body, since - // that's already readily available to the caller. - return content.MissingItemRange -} - -// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose -// elements correspond to the elements of the spec slice. -type TupleSpec []Spec - -func (s TupleSpec) visitSameBodyChildren(cb visitFunc) { - for _, c := range s { - cb(c) - } -} - -func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - vals := make([]cty.Value, len(s)) - var diags hcl.Diagnostics - - for i, spec := range s { - var ed hcl.Diagnostics - vals[i], ed = spec.decode(content, blockLabels, ctx) - diags = append(diags, ed...) - } - - return cty.TupleVal(vals), diags -} - -func (s TupleSpec) impliedType() cty.Type { - if len(s) == 0 { - return cty.EmptyTuple - } - - attrTypes := make([]cty.Type, len(s)) - for i, childSpec := range s { - attrTypes[i] = childSpec.impliedType() - } - return cty.Tuple(attrTypes) -} - -func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // This is not great, but the best we can do. In practice, it's rather - // strange to ask for the source range of an entire top-level body, since - // that's already readily available to the caller. - return content.MissingItemRange -} - -// An AttrSpec is a Spec that evaluates a particular attribute expression in -// the body and returns its resulting value converted to the requested type, -// or produces a diagnostic if the type is incorrect. -type AttrSpec struct { - Name string - Type cty.Type - Required bool -} - -func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// specNeedingVariables implementation -func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - attr, exists := content.Attributes[s.Name] - if !exists { - return nil - } - - return attr.Expr.Variables() -} - -// attrSpec implementation -func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema { - return []hcl.AttributeSchema{ - { - Name: s.Name, - Required: s.Required, - }, - } -} - -func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - attr, exists := content.Attributes[s.Name] - if !exists { - return content.MissingItemRange - } - - return attr.Expr.Range() -} - -func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - attr, exists := content.Attributes[s.Name] - if !exists { - // We don't need to check required and emit a diagnostic here, because - // that would already have happened when building "content". - return cty.NullVal(s.Type), nil - } - - val, diags := attr.Expr.Value(ctx) - - convVal, err := convert.Convert(val, s.Type) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect attribute value type", - Detail: fmt.Sprintf( - "Inappropriate value for attribute %q: %s.", - s.Name, err.Error(), - ), - Subject: attr.Expr.StartRange().Ptr(), - Context: hcl.RangeBetween(attr.NameRange, attr.Expr.StartRange()).Ptr(), - }) - // We'll return an unknown value of the _correct_ type so that the - // incomplete result can still be used for some analysis use-cases. - val = cty.UnknownVal(s.Type) - } else { - val = convVal - } - - return val, diags -} - -func (s *AttrSpec) impliedType() cty.Type { - return s.Type -} - -// A LiteralSpec is a Spec that produces the given literal value, ignoring -// the given body. -type LiteralSpec struct { - Value cty.Value -} - -func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return s.Value, nil -} - -func (s *LiteralSpec) impliedType() cty.Type { - return s.Value.Type() -} - -func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // No sensible range to return for a literal, so the caller had better - // ensure it doesn't cause any diagnostics. - return hcl.Range{ - Filename: "", - } -} - -// An ExprSpec is a Spec that evaluates the given expression, ignoring the -// given body. -type ExprSpec struct { - Expr hcl.Expression -} - -func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// specNeedingVariables implementation -func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - return s.Expr.Variables() -} - -func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return s.Expr.Value(ctx) -} - -func (s *ExprSpec) impliedType() cty.Type { - // We can't know the type of our expression until we evaluate it - return cty.DynamicPseudoType -} - -func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - return s.Expr.Range() -} - -// A BlockSpec is a Spec that produces a cty.Value by decoding the contents -// of a single nested block of a given type, using a nested spec. -// -// If the Required flag is not set, the nested block may be omitted, in which -// case a null value is produced. If it _is_ set, an error diagnostic is -// produced if there are no nested blocks of the given type. -type BlockSpec struct { - TypeName string - Nested Spec - Required bool -} - -func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return nil - } - - return Variables(childBlock.Body, s.Nested) -} - -func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - if childBlock != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "Only one block of type %q is allowed. Previous definition was at %s.", - s.TypeName, childBlock.DefRange.String(), - ), - Subject: &candidate.DefRange, - }) - break - } - - childBlock = candidate - } - - if childBlock == nil { - if s.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block of type %q is required here.", s.TypeName, - ), - Subject: &content.MissingItemRange, - }) - } - return cty.NullVal(s.Nested.impliedType()), diags - } - - if s.Nested == nil { - panic("BlockSpec with no Nested Spec") - } - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - return val, diags -} - -func (s *BlockSpec) impliedType() cty.Type { - return s.Nested.impliedType() -} - -func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockListSpec is a Spec that produces a cty list of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -type BlockListSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockListSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockListSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - var ret cty.Value - - if len(elems) == 0 { - ret = cty.ListValEmpty(s.Nested.impliedType()) - } else { - // Since our target is a list, all of the decoded elements must have the - // same type or cty.ListVal will panic below. Different types can arise - // if there is an attribute spec of type cty.DynamicPseudoType in the - // nested spec; all given values must be convertable to a single type - // in order for the result to be considered valid. - etys := make([]cty.Type, len(elems)) - for i, v := range elems { - etys[i] = v.Type() - } - ety, convs := convert.UnifyUnsafe(etys) - if ety == cty.NilType { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: "Corresponding attributes in all blocks of this type must be the same.", - Subject: &sourceRanges[0], - }) - return cty.DynamicVal, diags - } - for i, v := range elems { - if convs[i] != nil { - newV, err := convs[i](v) - if err != nil { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), - Subject: &sourceRanges[i], - }) - // Bail early here so we won't panic below in cty.ListVal - return cty.DynamicVal, diags - } - elems[i] = newV - } - } - - ret = cty.ListVal(elems) - } - - return ret, diags -} - -func (s *BlockListSpec) impliedType() cty.Type { - return cty.List(s.Nested.impliedType()) -} - -func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockTupleSpec is a Spec that produces a cty tuple of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// This is similar to BlockListSpec, but it permits the nested blocks to have -// different result types in situations where cty.DynamicPseudoType attributes -// are present. -type BlockTupleSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockTupleSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockTupleSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockTupleSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockTupleSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockTupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockListSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - var ret cty.Value - - if len(elems) == 0 { - ret = cty.EmptyTupleVal - } else { - ret = cty.TupleVal(elems) - } - - return ret, diags -} - -func (s *BlockTupleSpec) impliedType() cty.Type { - // We can't predict our type, because we don't know how many blocks - // there will be until we decode. - return cty.DynamicPseudoType -} - -func (s *BlockTupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockSetSpec is a Spec that produces a cty set of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -type BlockSetSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockSetSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockSetSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - var ret cty.Value - - if len(elems) == 0 { - ret = cty.SetValEmpty(s.Nested.impliedType()) - } else { - // Since our target is a set, all of the decoded elements must have the - // same type or cty.SetVal will panic below. Different types can arise - // if there is an attribute spec of type cty.DynamicPseudoType in the - // nested spec; all given values must be convertable to a single type - // in order for the result to be considered valid. - etys := make([]cty.Type, len(elems)) - for i, v := range elems { - etys[i] = v.Type() - } - ety, convs := convert.UnifyUnsafe(etys) - if ety == cty.NilType { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: "Corresponding attributes in all blocks of this type must be the same.", - Subject: &sourceRanges[0], - }) - return cty.DynamicVal, diags - } - for i, v := range elems { - if convs[i] != nil { - newV, err := convs[i](v) - if err != nil { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), - Subject: &sourceRanges[i], - }) - // Bail early here so we won't panic below in cty.ListVal - return cty.DynamicVal, diags - } - elems[i] = newV - } - } - - ret = cty.SetVal(elems) - } - - return ret, diags -} - -func (s *BlockSetSpec) impliedType() cty.Type { - return cty.Set(s.Nested.impliedType()) -} - -func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockMapSpec is a Spec that produces a cty map of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// One level of map structure is created for each of the given label names. -// There must be at least one given label name. -type BlockMapSpec struct { - TypeName string - LabelNames []string - Nested Spec -} - -func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), - }, - } -} - -// blockSpec implementation -func (s *BlockMapSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockMapSpec with no Nested Spec") - } - if ImpliedType(s).HasDynamicTypes() { - panic("cty.DynamicPseudoType attributes may not be used inside a BlockMapSpec") - } - - elems := map[string]interface{}{} - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - childLabels := labelsForBlock(childBlock) - val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) - targetMap := elems - for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { - if _, exists := targetMap[key]; !exists { - targetMap[key] = make(map[string]interface{}) - } - targetMap = targetMap[key].(map[string]interface{}) - } - - diags = append(diags, childDiags...) - - key := childBlock.Labels[len(s.LabelNames)-1] - if _, exists := targetMap[key]; exists { - labelsBuf := bytes.Buffer{} - for _, label := range childBlock.Labels { - fmt.Fprintf(&labelsBuf, " %q", label) - } - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block for %s%s was already defined. The %s labels must be unique.", - s.TypeName, labelsBuf.String(), s.TypeName, - ), - Subject: &childBlock.DefRange, - }) - continue - } - - targetMap[key] = val - } - - if len(elems) == 0 { - return cty.MapValEmpty(s.Nested.impliedType()), diags - } - - var ctyMap func(map[string]interface{}, int) cty.Value - ctyMap = func(raw map[string]interface{}, depth int) cty.Value { - vals := make(map[string]cty.Value, len(raw)) - if depth == 1 { - for k, v := range raw { - vals[k] = v.(cty.Value) - } - } else { - for k, v := range raw { - vals[k] = ctyMap(v.(map[string]interface{}), depth-1) - } - } - return cty.MapVal(vals) - } - - return ctyMap(elems, len(s.LabelNames)), diags -} - -func (s *BlockMapSpec) impliedType() cty.Type { - ret := s.Nested.impliedType() - for _ = range s.LabelNames { - ret = cty.Map(ret) - } - return ret -} - -func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockObjectSpec is a Spec that produces a cty object of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// One level of object structure is created for each of the given label names. -// There must be at least one given label name. -// -// This is similar to BlockMapSpec, but it permits the nested blocks to have -// different result types in situations where cty.DynamicPseudoType attributes -// are present. -type BlockObjectSpec struct { - TypeName string - LabelNames []string - Nested Spec -} - -func (s *BlockObjectSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockObjectSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), - }, - } -} - -// blockSpec implementation -func (s *BlockObjectSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockObjectSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockObjectSpec with no Nested Spec") - } - - elems := map[string]interface{}{} - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - childLabels := labelsForBlock(childBlock) - val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) - targetMap := elems - for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { - if _, exists := targetMap[key]; !exists { - targetMap[key] = make(map[string]interface{}) - } - targetMap = targetMap[key].(map[string]interface{}) - } - - diags = append(diags, childDiags...) - - key := childBlock.Labels[len(s.LabelNames)-1] - if _, exists := targetMap[key]; exists { - labelsBuf := bytes.Buffer{} - for _, label := range childBlock.Labels { - fmt.Fprintf(&labelsBuf, " %q", label) - } - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block for %s%s was already defined. The %s labels must be unique.", - s.TypeName, labelsBuf.String(), s.TypeName, - ), - Subject: &childBlock.DefRange, - }) - continue - } - - targetMap[key] = val - } - - if len(elems) == 0 { - return cty.EmptyObjectVal, diags - } - - var ctyObj func(map[string]interface{}, int) cty.Value - ctyObj = func(raw map[string]interface{}, depth int) cty.Value { - vals := make(map[string]cty.Value, len(raw)) - if depth == 1 { - for k, v := range raw { - vals[k] = v.(cty.Value) - } - } else { - for k, v := range raw { - vals[k] = ctyObj(v.(map[string]interface{}), depth-1) - } - } - return cty.ObjectVal(vals) - } - - return ctyObj(elems, len(s.LabelNames)), diags -} - -func (s *BlockObjectSpec) impliedType() cty.Type { - // We can't predict our type, since we don't know how many blocks are - // present and what labels they have until we decode. - return cty.DynamicPseudoType -} - -func (s *BlockObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockAttrsSpec is a Spec that interprets a single block as if it were -// a map of some element type. That is, each attribute within the block -// becomes a key in the resulting map and the attribute's value becomes the -// element value, after conversion to the given element type. The resulting -// value is a cty.Map of the given element type. -// -// This spec imposes a validation constraint that there be exactly one block -// of the given type name and that this block may contain only attributes. The -// block does not accept any labels. -// -// This is an alternative to an AttrSpec of a map type for situations where -// block syntax is desired. Note that block syntax does not permit dynamic -// keys, construction of the result via a "for" expression, etc. In most cases -// an AttrSpec is preferred if the desired result is a map whose keys are -// chosen by the user rather than by schema. -type BlockAttrsSpec struct { - TypeName string - ElementType cty.Type - Required bool -} - -func (s *BlockAttrsSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// blockSpec implementation -func (s *BlockAttrsSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: nil, - }, - } -} - -// blockSpec implementation -func (s *BlockAttrsSpec) nestedSpec() Spec { - // This is an odd case: we aren't actually going to apply a nested spec - // in this case, since we're going to interpret the body directly as - // attributes, but we need to return something non-nil so that the - // decoder will recognize this as a block spec. We won't actually be - // using this for anything at decode time. - return noopSpec{} -} - -// specNeedingVariables implementation -func (s *BlockAttrsSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - - block, _ := s.findBlock(content) - if block == nil { - return nil - } - - var vars []hcl.Traversal - - attrs, diags := block.Body.JustAttributes() - if diags.HasErrors() { - return nil - } - - for _, attr := range attrs { - vars = append(vars, attr.Expr.Variables()...) - } - - // We'll return the variables references in source order so that any - // error messages that result are also in source order. - sort.Slice(vars, func(i, j int) bool { - return vars[i].SourceRange().Start.Byte < vars[j].SourceRange().Start.Byte - }) - - return vars -} - -func (s *BlockAttrsSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - block, other := s.findBlock(content) - if block == nil { - if s.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block of type %q is required here.", s.TypeName, - ), - Subject: &content.MissingItemRange, - }) - } - return cty.NullVal(cty.Map(s.ElementType)), diags - } - if other != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "Only one block of type %q is allowed. Previous definition was at %s.", - s.TypeName, block.DefRange.String(), - ), - Subject: &other.DefRange, - }) - } - - attrs, attrDiags := block.Body.JustAttributes() - diags = append(diags, attrDiags...) - - if len(attrs) == 0 { - return cty.MapValEmpty(s.ElementType), diags - } - - vals := make(map[string]cty.Value, len(attrs)) - for name, attr := range attrs { - attrVal, attrDiags := attr.Expr.Value(ctx) - diags = append(diags, attrDiags...) - - attrVal, err := convert.Convert(attrVal, s.ElementType) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid attribute value", - Detail: fmt.Sprintf("Invalid value for attribute of %q block: %s.", s.TypeName, err), - Subject: attr.Expr.Range().Ptr(), - }) - attrVal = cty.UnknownVal(s.ElementType) - } - - vals[name] = attrVal - } - - return cty.MapVal(vals), diags -} - -func (s *BlockAttrsSpec) impliedType() cty.Type { - return cty.Map(s.ElementType) -} - -func (s *BlockAttrsSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - block, _ := s.findBlock(content) - if block == nil { - return content.MissingItemRange - } - return block.DefRange -} - -func (s *BlockAttrsSpec) findBlock(content *hcl.BodyContent) (block *hcl.Block, other *hcl.Block) { - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - if block != nil { - return block, candidate - } - block = candidate - } - - return block, nil -} - -// A BlockLabelSpec is a Spec that returns a cty.String representing the -// label of the block its given body belongs to, if indeed its given body -// belongs to a block. It is a programming error to use this in a non-block -// context, so this spec will panic in that case. -// -// This spec only works in the nested spec within a BlockSpec, BlockListSpec, -// BlockSetSpec or BlockMapSpec. -// -// The full set of label specs used against a particular block must have a -// consecutive set of indices starting at zero. The maximum index found -// defines how many labels the corresponding blocks must have in cty source. -type BlockLabelSpec struct { - Index int - Name string -} - -func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - if s.Index >= len(blockLabels) { - panic("BlockListSpec used in non-block context") - } - - return cty.StringVal(blockLabels[s.Index].Value), nil -} - -func (s *BlockLabelSpec) impliedType() cty.Type { - return cty.String // labels are always strings -} - -func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - if s.Index >= len(blockLabels) { - panic("BlockListSpec used in non-block context") - } - - return blockLabels[s.Index].Range -} - -func findLabelSpecs(spec Spec) []string { - maxIdx := -1 - var names map[int]string - - var visit visitFunc - visit = func(s Spec) { - if ls, ok := s.(*BlockLabelSpec); ok { - if maxIdx < ls.Index { - maxIdx = ls.Index - } - if names == nil { - names = make(map[int]string) - } - names[ls.Index] = ls.Name - } - s.visitSameBodyChildren(visit) - } - - visit(spec) - - if maxIdx < 0 { - return nil // no labels at all - } - - ret := make([]string, maxIdx+1) - for i := range ret { - name := names[i] - if name == "" { - // Should never happen if the spec is conformant, since we require - // consecutive indices starting at zero. - name = fmt.Sprintf("missing%02d", i) - } - ret[i] = name - } - - return ret -} - -// DefaultSpec is a spec that wraps two specs, evaluating the primary first -// and then evaluating the default if the primary returns a null value. -// -// The two specifications must have the same implied result type for correct -// operation. If not, the result is undefined. -// -// Any requirements imposed by the "Default" spec apply even if "Primary" does -// not return null. For example, if the "Default" spec is for a required -// attribute then that attribute is always required, regardless of the result -// of the "Primary" spec. -// -// The "Default" spec must not describe a nested block, since otherwise the -// result of ChildBlockTypes would not be decidable without evaluation. If -// the default spec _does_ describe a nested block then the result is -// undefined. -type DefaultSpec struct { - Primary Spec - Default Spec -} - -func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Primary) - cb(s.Default) -} - -func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - val, diags := s.Primary.decode(content, blockLabels, ctx) - if val.IsNull() { - var moreDiags hcl.Diagnostics - val, moreDiags = s.Default.decode(content, blockLabels, ctx) - diags = append(diags, moreDiags...) - } - return val, diags -} - -func (s *DefaultSpec) impliedType() cty.Type { - return s.Primary.impliedType() -} - -// attrSpec implementation -func (s *DefaultSpec) attrSchemata() []hcl.AttributeSchema { - // We must pass through the union of both of our nested specs so that - // we'll have both values available in the result. - var ret []hcl.AttributeSchema - if as, ok := s.Primary.(attrSpec); ok { - ret = append(ret, as.attrSchemata()...) - } - if as, ok := s.Default.(attrSpec); ok { - ret = append(ret, as.attrSchemata()...) - } - return ret -} - -// blockSpec implementation -func (s *DefaultSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - // Only the primary spec may describe a block, since otherwise - // our nestedSpec method below can't know which to return. - if bs, ok := s.Primary.(blockSpec); ok { - return bs.blockHeaderSchemata() - } - return nil -} - -// blockSpec implementation -func (s *DefaultSpec) nestedSpec() Spec { - if bs, ok := s.Primary.(blockSpec); ok { - return bs.nestedSpec() - } - return nil -} - -func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We can't tell from here which of the two specs will ultimately be used - // in our result, so we'll just assume the first. This is usually the right - // choice because the default is often a literal spec that doesn't have a - // reasonable source range to return anyway. - return s.Primary.sourceRange(content, blockLabels) -} - -// TransformExprSpec is a spec that wraps another and then evaluates a given -// hcl.Expression on the result. -// -// The implied type of this spec is determined by evaluating the expression -// with an unknown value of the nested spec's implied type, which may cause -// the result to be imprecise. This spec should not be used in situations where -// precise result type information is needed. -type TransformExprSpec struct { - Wrapped Spec - Expr hcl.Expression - TransformCtx *hcl.EvalContext - VarName string -} - -func (s *TransformExprSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Wrapped) -} - -func (s *TransformExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) - if diags.HasErrors() { - // We won't try to run our function in this case, because it'll probably - // generate confusing additional errors that will distract from the - // root cause. - return cty.UnknownVal(s.impliedType()), diags - } - - chiCtx := s.TransformCtx.NewChild() - chiCtx.Variables = map[string]cty.Value{ - s.VarName: wrappedVal, - } - resultVal, resultDiags := s.Expr.Value(chiCtx) - diags = append(diags, resultDiags...) - return resultVal, diags -} - -func (s *TransformExprSpec) impliedType() cty.Type { - wrappedTy := s.Wrapped.impliedType() - chiCtx := s.TransformCtx.NewChild() - chiCtx.Variables = map[string]cty.Value{ - s.VarName: cty.UnknownVal(wrappedTy), - } - resultVal, _ := s.Expr.Value(chiCtx) - return resultVal.Type() -} - -func (s *TransformExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We'll just pass through our wrapped range here, even though that's - // not super-accurate, because there's nothing better to return. - return s.Wrapped.sourceRange(content, blockLabels) -} - -// TransformFuncSpec is a spec that wraps another and then evaluates a given -// cty function with the result. The given function must expect exactly one -// argument, where the result of the wrapped spec will be passed. -// -// The implied type of this spec is determined by type-checking the function -// with an unknown value of the nested spec's implied type, which may cause -// the result to be imprecise. This spec should not be used in situations where -// precise result type information is needed. -// -// If the given function produces an error when run, this spec will produce -// a non-user-actionable diagnostic message. It's the caller's responsibility -// to ensure that the given function cannot fail for any non-error result -// of the wrapped spec. -type TransformFuncSpec struct { - Wrapped Spec - Func function.Function -} - -func (s *TransformFuncSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Wrapped) -} - -func (s *TransformFuncSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) - if diags.HasErrors() { - // We won't try to run our function in this case, because it'll probably - // generate confusing additional errors that will distract from the - // root cause. - return cty.UnknownVal(s.impliedType()), diags - } - - resultVal, err := s.Func.Call([]cty.Value{wrappedVal}) - if err != nil { - // This is not a good example of a diagnostic because it is reporting - // a programming error in the calling application, rather than something - // an end-user could act on. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Transform function failed", - Detail: fmt.Sprintf("Decoder transform returned an error: %s", err), - Subject: s.sourceRange(content, blockLabels).Ptr(), - }) - return cty.UnknownVal(s.impliedType()), diags - } - - return resultVal, diags -} - -func (s *TransformFuncSpec) impliedType() cty.Type { - wrappedTy := s.Wrapped.impliedType() - resultTy, err := s.Func.ReturnType([]cty.Type{wrappedTy}) - if err != nil { - // Should never happen with a correctly-configured spec - return cty.DynamicPseudoType - } - - return resultTy -} - -func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We'll just pass through our wrapped range here, even though that's - // not super-accurate, because there's nothing better to return. - return s.Wrapped.sourceRange(content, blockLabels) -} - -// noopSpec is a placeholder spec that does nothing, used in situations where -// a non-nil placeholder spec is required. It is not exported because there is -// no reason to use it directly; it is always an implementation detail only. -type noopSpec struct { -} - -func (s noopSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return cty.NullVal(cty.DynamicPseudoType), nil -} - -func (s noopSpec) impliedType() cty.Type { - return cty.DynamicPseudoType -} - -func (s noopSpec) visitSameBodyChildren(cb visitFunc) { - // nothing to do -} - -func (s noopSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // No useful range for a noopSpec, and nobody should be calling this anyway. - return hcl.Range{ - Filename: "noopSpec", - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go deleted file mode 100644 index f8440eb6..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -// Variables processes the given body with the given spec and returns a -// list of the variable traversals that would be required to decode -// the same pairing of body and spec. -// -// This can be used to conditionally populate the variables in the EvalContext -// passed to Decode, for applications where a static scope is insufficient. -// -// If the given body is not compliant with the given schema, the result may -// be incomplete, but that's assumed to be okay because the eventual call -// to Decode will produce error diagnostics anyway. -func Variables(body hcl.Body, spec Spec) []hcl.Traversal { - var vars []hcl.Traversal - schema := ImpliedSchema(spec) - content, _, _ := body.PartialContent(schema) - - if vs, ok := spec.(specNeedingVariables); ok { - vars = append(vars, vs.variablesNeeded(content)...) - } - - var visitFn visitFunc - visitFn = func(s Spec) { - if vs, ok := s.(specNeedingVariables); ok { - vars = append(vars, vs.variablesNeeded(content)...) - } - s.visitSameBodyChildren(visitFn) - } - spec.visitSameBodyChildren(visitFn) - - return vars -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go b/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go deleted file mode 100644 index 1a801448..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package hcled provides functionality intended to help an application -// that embeds HCL to deliver relevant information to a text editor or IDE -// for navigating around and analyzing configuration files. -package hcled diff --git a/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go b/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go deleted file mode 100644 index 050ad758..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go +++ /dev/null @@ -1,34 +0,0 @@ -package hcled - -import ( - "github.com/hashicorp/hcl/v2" -) - -type contextStringer interface { - ContextString(offset int) string -} - -// ContextString returns a string describing the context of the given byte -// offset, if available. An empty string is returned if no such information -// is available, or otherwise the returned string is in a form that depends -// on the language used to write the referenced file. -func ContextString(file *hcl.File, offset int) string { - if cser, ok := file.Nav.(contextStringer); ok { - return cser.ContextString(offset) - } - return "" -} - -type contextDefRanger interface { - ContextDefRange(offset int) hcl.Range -} - -func ContextDefRange(file *hcl.File, offset int) hcl.Range { - if cser, ok := file.Nav.(contextDefRanger); ok { - defRange := cser.ContextDefRange(offset) - if !defRange.Empty() { - return defRange - } - } - return file.Body.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go deleted file mode 100644 index 1dc2eccd..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go +++ /dev/null @@ -1,135 +0,0 @@ -// Package hclparse has the main API entry point for parsing both HCL native -// syntax and HCL JSON. -// -// The main HCL package also includes SimpleParse and SimpleParseFile which -// can be a simpler interface for the common case where an application just -// needs to parse a single file. The gohcl package simplifies that further -// in its SimpleDecode function, which combines hcl.SimpleParse with decoding -// into Go struct values -// -// Package hclparse, then, is useful for applications that require more fine -// control over parsing or which need to load many separate files and keep -// track of them for possible error reporting or other analysis. -package hclparse - -import ( - "fmt" - "io/ioutil" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/hcl/v2/json" -) - -// NOTE: This is the public interface for parsing. The actual parsers are -// in other packages alongside this one, with this package just wrapping them -// to provide a unified interface for the caller across all supported formats. - -// Parser is the main interface for parsing configuration files. As well as -// parsing files, a parser also retains a registry of all of the files it -// has parsed so that multiple attempts to parse the same file will return -// the same object and so the collected files can be used when printing -// diagnostics. -// -// Any diagnostics for parsing a file are only returned once on the first -// call to parse that file. Callers are expected to collect up diagnostics -// and present them together, so returning diagnostics for the same file -// multiple times would create a confusing result. -type Parser struct { - files map[string]*hcl.File -} - -// NewParser creates a new parser, ready to parse configuration files. -func NewParser() *Parser { - return &Parser{ - files: map[string]*hcl.File{}, - } -} - -// ParseHCL parses the given buffer (which is assumed to have been loaded from -// the given filename) as a native-syntax configuration file and returns the -// hcl.File object representing it. -func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) - p.files[filename] = file - return file, diags -} - -// ParseHCLFile reads the given filename and parses it as a native-syntax HCL -// configuration file. An error diagnostic is returned if the given file -// cannot be read. -func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - src, err := ioutil.ReadFile(filename) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The configuration file %q could not be read.", filename), - }, - } - } - - return p.ParseHCL(src, filename) -} - -// ParseJSON parses the given JSON buffer (which is assumed to have been loaded -// from the given filename) and returns the hcl.File object representing it. -func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := json.Parse(src, filename) - p.files[filename] = file - return file, diags -} - -// ParseJSONFile reads the given filename and parses it as JSON, similarly to -// ParseJSON. An error diagnostic is returned if the given file cannot be read. -func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := json.ParseFile(filename) - p.files[filename] = file - return file, diags -} - -// AddFile allows a caller to record in a parser a file that was parsed some -// other way, thus allowing it to be included in the registry of sources. -func (p *Parser) AddFile(filename string, file *hcl.File) { - p.files[filename] = file -} - -// Sources returns a map from filenames to the raw source code that was -// read from them. This is intended to be used, for example, to print -// diagnostics with contextual information. -// -// The arrays underlying the returned slices should not be modified. -func (p *Parser) Sources() map[string][]byte { - ret := make(map[string][]byte) - for fn, f := range p.files { - ret[fn] = f.Bytes - } - return ret -} - -// Files returns a map from filenames to the File objects produced from them. -// This is intended to be used, for example, to print diagnostics with -// contextual information. -// -// The returned map and all of the objects it refers to directly or indirectly -// must not be modified. -func (p *Parser) Files() map[string]*hcl.File { - return p.files -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go deleted file mode 100644 index 09041652..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go +++ /dev/null @@ -1,121 +0,0 @@ -package hclwrite - -import ( - "bytes" - "io" -) - -type File struct { - inTree - - srcBytes []byte - body *node -} - -// NewEmptyFile constructs a new file with no content, ready to be mutated -// by other calls that append to its body. -func NewEmptyFile() *File { - f := &File{ - inTree: newInTree(), - } - body := newBody() - f.body = f.children.Append(body) - return f -} - -// Body returns the root body of the file, which contains the top-level -// attributes and blocks. -func (f *File) Body() *Body { - return f.body.content.(*Body) -} - -// WriteTo writes the tokens underlying the receiving file to the given writer. -// -// The tokens first have a simple formatting pass applied that adjusts only -// the spaces between them. -func (f *File) WriteTo(wr io.Writer) (int64, error) { - tokens := f.inTree.children.BuildTokens(nil) - format(tokens) - return tokens.WriteTo(wr) -} - -// Bytes returns a buffer containing the source code resulting from the -// tokens underlying the receiving file. If any updates have been made via -// the AST API, these will be reflected in the result. -func (f *File) Bytes() []byte { - buf := &bytes.Buffer{} - f.WriteTo(buf) - return buf.Bytes() -} - -type comments struct { - leafNode - - parent *node - tokens Tokens -} - -func newComments(tokens Tokens) *comments { - return &comments{ - tokens: tokens, - } -} - -func (c *comments) BuildTokens(to Tokens) Tokens { - return c.tokens.BuildTokens(to) -} - -type identifier struct { - leafNode - - parent *node - token *Token -} - -func newIdentifier(token *Token) *identifier { - return &identifier{ - token: token, - } -} - -func (i *identifier) BuildTokens(to Tokens) Tokens { - return append(to, i.token) -} - -func (i *identifier) hasName(name string) bool { - return name == string(i.token.Bytes) -} - -type number struct { - leafNode - - parent *node - token *Token -} - -func newNumber(token *Token) *number { - return &number{ - token: token, - } -} - -func (n *number) BuildTokens(to Tokens) Tokens { - return append(to, n.token) -} - -type quoted struct { - leafNode - - parent *node - tokens Tokens -} - -func newQuoted(tokens Tokens) *quoted { - return "ed{ - tokens: tokens, - } -} - -func (q *quoted) BuildTokens(to Tokens) Tokens { - return q.tokens.BuildTokens(to) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go deleted file mode 100644 index 609419ff..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go +++ /dev/null @@ -1,48 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -type Attribute struct { - inTree - - leadComments *node - name *node - expr *node - lineComments *node -} - -func newAttribute() *Attribute { - return &Attribute{ - inTree: newInTree(), - } -} - -func (a *Attribute) init(name string, expr *Expression) { - expr.assertUnattached() - - nameTok := newIdentToken(name) - nameObj := newIdentifier(nameTok) - a.leadComments = a.children.Append(newComments(nil)) - a.name = a.children.Append(nameObj) - a.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenEqual, - Bytes: []byte{'='}, - }, - }) - a.expr = a.children.Append(expr) - a.expr.list = a.children - a.lineComments = a.children.Append(newComments(nil)) - a.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) -} - -func (a *Attribute) Expr() *Expression { - return a.expr.content.(*Expression) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go deleted file mode 100644 index f7d3921b..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go +++ /dev/null @@ -1,118 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -type Block struct { - inTree - - leadComments *node - typeName *node - labels nodeSet - open *node - body *node - close *node -} - -func newBlock() *Block { - return &Block{ - inTree: newInTree(), - labels: newNodeSet(), - } -} - -// NewBlock constructs a new, empty block with the given type name and labels. -func NewBlock(typeName string, labels []string) *Block { - block := newBlock() - block.init(typeName, labels) - return block -} - -func (b *Block) init(typeName string, labels []string) { - nameTok := newIdentToken(typeName) - nameObj := newIdentifier(nameTok) - b.leadComments = b.children.Append(newComments(nil)) - b.typeName = b.children.Append(nameObj) - for _, label := range labels { - labelToks := TokensForValue(cty.StringVal(label)) - labelObj := newQuoted(labelToks) - labelNode := b.children.Append(labelObj) - b.labels.Add(labelNode) - } - b.open = b.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenOBrace, - Bytes: []byte{'{'}, - }, - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) - body := newBody() // initially totally empty; caller can append to it subsequently - b.body = b.children.Append(body) - b.close = b.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenCBrace, - Bytes: []byte{'}'}, - }, - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) -} - -// Body returns the body that represents the content of the receiving block. -// -// Appending to or otherwise modifying this body will make changes to the -// tokens that are generated between the blocks open and close braces. -func (b *Block) Body() *Body { - return b.body.content.(*Body) -} - -// Type returns the type name of the block. -func (b *Block) Type() string { - typeNameObj := b.typeName.content.(*identifier) - return string(typeNameObj.token.Bytes) -} - -// Labels returns the labels of the block. -func (b *Block) Labels() []string { - labelNames := make([]string, 0, len(b.labels)) - list := b.labels.List() - - for _, label := range list { - switch labelObj := label.content.(type) { - case *identifier: - if labelObj.token.Type == hclsyntax.TokenIdent { - labelString := string(labelObj.token.Bytes) - labelNames = append(labelNames, labelString) - } - - case *quoted: - tokens := labelObj.tokens - if len(tokens) == 3 && - tokens[0].Type == hclsyntax.TokenOQuote && - tokens[1].Type == hclsyntax.TokenQuotedLit && - tokens[2].Type == hclsyntax.TokenCQuote { - // Note that TokenQuotedLit may contain escape sequences. - labelString, diags := hclsyntax.ParseStringLiteralToken(tokens[1].asHCLSyntax()) - - // If parsing the string literal returns error diagnostics - // then we can just assume the label doesn't match, because it's invalid in some way. - if !diags.HasErrors() { - labelNames = append(labelNames, labelString) - } - } - - default: - // If neither of the previous cases are true (should be impossible) - // then we can just ignore it, because it's invalid too. - } - } - - return labelNames -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go deleted file mode 100644 index c16d13e3..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go +++ /dev/null @@ -1,219 +0,0 @@ -package hclwrite - -import ( - "reflect" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -type Body struct { - inTree - - items nodeSet -} - -func newBody() *Body { - return &Body{ - inTree: newInTree(), - items: newNodeSet(), - } -} - -func (b *Body) appendItem(c nodeContent) *node { - nn := b.children.Append(c) - b.items.Add(nn) - return nn -} - -func (b *Body) appendItemNode(nn *node) *node { - nn.assertUnattached() - b.children.AppendNode(nn) - b.items.Add(nn) - return nn -} - -// Clear removes all of the items from the body, making it empty. -func (b *Body) Clear() { - b.children.Clear() -} - -func (b *Body) AppendUnstructuredTokens(ts Tokens) { - b.inTree.children.Append(ts) -} - -// Attributes returns a new map of all of the attributes in the body, with -// the attribute names as the keys. -func (b *Body) Attributes() map[string]*Attribute { - ret := make(map[string]*Attribute) - for n := range b.items { - if attr, isAttr := n.content.(*Attribute); isAttr { - nameObj := attr.name.content.(*identifier) - name := string(nameObj.token.Bytes) - ret[name] = attr - } - } - return ret -} - -// Blocks returns a new slice of all the blocks in the body. -func (b *Body) Blocks() []*Block { - ret := make([]*Block, 0, len(b.items)) - for n := range b.items { - if block, isBlock := n.content.(*Block); isBlock { - ret = append(ret, block) - } - } - return ret -} - -// GetAttribute returns the attribute from the body that has the given name, -// or returns nil if there is currently no matching attribute. -func (b *Body) GetAttribute(name string) *Attribute { - for n := range b.items { - if attr, isAttr := n.content.(*Attribute); isAttr { - nameObj := attr.name.content.(*identifier) - if nameObj.hasName(name) { - // We've found it! - return attr - } - } - } - - return nil -} - -// getAttributeNode is like GetAttribute but it returns the node containing -// the selected attribute (if one is found) rather than the attribute itself. -func (b *Body) getAttributeNode(name string) *node { - for n := range b.items { - if attr, isAttr := n.content.(*Attribute); isAttr { - nameObj := attr.name.content.(*identifier) - if nameObj.hasName(name) { - // We've found it! - return n - } - } - } - - return nil -} - -// FirstMatchingBlock returns a first matching block from the body that has the -// given name and labels or returns nil if there is currently no matching -// block. -func (b *Body) FirstMatchingBlock(typeName string, labels []string) *Block { - for _, block := range b.Blocks() { - if typeName == block.Type() { - labelNames := block.Labels() - if len(labels) == 0 && len(labelNames) == 0 { - return block - } - if reflect.DeepEqual(labels, labelNames) { - return block - } - } - } - - return nil -} - -// RemoveBlock removes the given block from the body, if it's in that body. -// If it isn't present, this is a no-op. -// -// Returns true if it removed something, or false otherwise. -func (b *Body) RemoveBlock(block *Block) bool { - for n := range b.items { - if n.content == block { - n.Detach() - b.items.Remove(n) - return true - } - } - return false -} - -// SetAttributeValue either replaces the expression of an existing attribute -// of the given name or adds a new attribute definition to the end of the block. -// -// The value is given as a cty.Value, and must therefore be a literal. To set -// a variable reference or other traversal, use SetAttributeTraversal. -// -// The return value is the attribute that was either modified in-place or -// created. -func (b *Body) SetAttributeValue(name string, val cty.Value) *Attribute { - attr := b.GetAttribute(name) - expr := NewExpressionLiteral(val) - if attr != nil { - attr.expr = attr.expr.ReplaceWith(expr) - } else { - attr := newAttribute() - attr.init(name, expr) - b.appendItem(attr) - } - return attr -} - -// SetAttributeTraversal either replaces the expression of an existing attribute -// of the given name or adds a new attribute definition to the end of the body. -// -// The new expression is given as a hcl.Traversal, which must be an absolute -// traversal. To set a literal value, use SetAttributeValue. -// -// The return value is the attribute that was either modified in-place or -// created. -func (b *Body) SetAttributeTraversal(name string, traversal hcl.Traversal) *Attribute { - attr := b.GetAttribute(name) - expr := NewExpressionAbsTraversal(traversal) - if attr != nil { - attr.expr = attr.expr.ReplaceWith(expr) - } else { - attr := newAttribute() - attr.init(name, expr) - b.appendItem(attr) - } - return attr -} - -// RemoveAttribute removes the attribute with the given name from the body. -// -// The return value is the attribute that was removed, or nil if there was -// no such attribute (in which case the call was a no-op). -func (b *Body) RemoveAttribute(name string) *Attribute { - node := b.getAttributeNode(name) - if node == nil { - return nil - } - node.Detach() - b.items.Remove(node) - return node.content.(*Attribute) -} - -// AppendBlock appends an existing block (which must not be already attached -// to a body) to the end of the receiving body. -func (b *Body) AppendBlock(block *Block) *Block { - b.appendItem(block) - return block -} - -// AppendNewBlock appends a new nested block to the end of the receiving body -// with the given type name and labels. -func (b *Body) AppendNewBlock(typeName string, labels []string) *Block { - block := newBlock() - block.init(typeName, labels) - b.appendItem(block) - return block -} - -// AppendNewline appends a newline token to th end of the receiving body, -// which generally serves as a separator between different sets of body -// contents. -func (b *Body) AppendNewline() { - b.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go deleted file mode 100644 index 854e7169..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go +++ /dev/null @@ -1,201 +0,0 @@ -package hclwrite - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -type Expression struct { - inTree - - absTraversals nodeSet -} - -func newExpression() *Expression { - return &Expression{ - inTree: newInTree(), - absTraversals: newNodeSet(), - } -} - -// NewExpressionLiteral constructs an an expression that represents the given -// literal value. -// -// Since an unknown value cannot be represented in source code, this function -// will panic if the given value is unknown or contains a nested unknown value. -// Use val.IsWhollyKnown before calling to be sure. -// -// HCL native syntax does not directly represent lists, maps, and sets, and -// instead relies on the automatic conversions to those collection types from -// either list or tuple constructor syntax. Therefore converting collection -// values to source code and re-reading them will lose type information, and -// the reader must provide a suitable type at decode time to recover the -// original value. -func NewExpressionLiteral(val cty.Value) *Expression { - toks := TokensForValue(val) - expr := newExpression() - expr.children.AppendUnstructuredTokens(toks) - return expr -} - -// NewExpressionAbsTraversal constructs an expression that represents the -// given traversal, which must be absolute or this function will panic. -func NewExpressionAbsTraversal(traversal hcl.Traversal) *Expression { - if traversal.IsRelative() { - panic("can't construct expression from relative traversal") - } - - physT := newTraversal() - rootName := traversal.RootName() - steps := traversal[1:] - - { - tn := newTraverseName() - tn.name = tn.children.Append(newIdentifier(&Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(rootName), - })) - physT.steps.Add(physT.children.Append(tn)) - } - - for _, step := range steps { - switch ts := step.(type) { - case hcl.TraverseAttr: - tn := newTraverseName() - tn.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenDot, - Bytes: []byte{'.'}, - }, - }) - tn.name = tn.children.Append(newIdentifier(&Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - })) - physT.steps.Add(physT.children.Append(tn)) - case hcl.TraverseIndex: - ti := newTraverseIndex() - ti.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }, - }) - indexExpr := NewExpressionLiteral(ts.Key) - ti.key = ti.children.Append(indexExpr) - ti.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }, - }) - physT.steps.Add(physT.children.Append(ti)) - } - } - - expr := newExpression() - expr.absTraversals.Add(expr.children.Append(physT)) - return expr -} - -// Variables returns the absolute traversals that exist within the receiving -// expression. -func (e *Expression) Variables() []*Traversal { - nodes := e.absTraversals.List() - ret := make([]*Traversal, len(nodes)) - for i, node := range nodes { - ret[i] = node.content.(*Traversal) - } - return ret -} - -// RenameVariablePrefix examines each of the absolute traversals in the -// receiving expression to see if they have the given sequence of names as -// a prefix prefix. If so, they are updated in place to have the given -// replacement names instead of that prefix. -// -// This can be used to implement symbol renaming. The calling application can -// visit all relevant expressions in its input and apply the same renaming -// to implement a global symbol rename. -// -// The search and replacement traversals must be the same length, or this -// method will panic. Only attribute access operations can be matched and -// replaced. Index steps never match the prefix. -func (e *Expression) RenameVariablePrefix(search, replacement []string) { - if len(search) != len(replacement) { - panic(fmt.Sprintf("search and replacement length mismatch (%d and %d)", len(search), len(replacement))) - } -Traversals: - for node := range e.absTraversals { - traversal := node.content.(*Traversal) - if len(traversal.steps) < len(search) { - // If it's shorter then it can't have our prefix - continue - } - - stepNodes := traversal.steps.List() - for i, name := range search { - step, isName := stepNodes[i].content.(*TraverseName) - if !isName { - continue Traversals // only name nodes can match - } - foundNameBytes := step.name.content.(*identifier).token.Bytes - if len(foundNameBytes) != len(name) { - continue Traversals - } - if string(foundNameBytes) != name { - continue Traversals - } - } - - // If we get here then the prefix matched, so now we'll swap in - // the replacement strings. - for i, name := range replacement { - step := stepNodes[i].content.(*TraverseName) - token := step.name.content.(*identifier).token - token.Bytes = []byte(name) - } - } -} - -// Traversal represents a sequence of variable, attribute, and/or index -// operations. -type Traversal struct { - inTree - - steps nodeSet -} - -func newTraversal() *Traversal { - return &Traversal{ - inTree: newInTree(), - steps: newNodeSet(), - } -} - -type TraverseName struct { - inTree - - name *node -} - -func newTraverseName() *TraverseName { - return &TraverseName{ - inTree: newInTree(), - } -} - -type TraverseIndex struct { - inTree - - key *node -} - -func newTraverseIndex() *TraverseIndex { - return &TraverseIndex{ - inTree: newInTree(), - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go deleted file mode 100644 index 56d5b775..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hclwrite deals with the problem of generating HCL configuration -// and of making specific surgical changes to existing HCL configurations. -// -// It operates at a different level of abstraction than the main HCL parser -// and AST, since details such as the placement of comments and newlines -// are preserved when unchanged. -// -// The hclwrite API follows a similar principle to XML/HTML DOM, allowing nodes -// to be read out, created and inserted, etc. Nodes represent syntax constructs -// rather than semantic concepts. -package hclwrite diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go deleted file mode 100644 index b94bee38..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go +++ /dev/null @@ -1,463 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -var inKeyword = hclsyntax.Keyword([]byte{'i', 'n'}) - -// placeholder token used when we don't have a token but we don't want -// to pass a real "nil" and complicate things with nil pointer checks -var nilToken = &Token{ - Type: hclsyntax.TokenNil, - Bytes: []byte{}, - SpacesBefore: 0, -} - -// format rewrites tokens within the given sequence, in-place, to adjust the -// whitespace around their content to achieve canonical formatting. -func format(tokens Tokens) { - // Formatting is a multi-pass process. More details on the passes below, - // but this is the overview: - // - adjust the leading space on each line to create appropriate - // indentation - // - adjust spaces between tokens in a single cell using a set of rules - // - adjust the leading space in the "assign" and "comment" cells on each - // line to vertically align with neighboring lines. - // All of these steps operate in-place on the given tokens, so a caller - // may collect a flat sequence of all of the tokens underlying an AST - // and pass it here and we will then indirectly modify the AST itself. - // Formatting must change only whitespace. Specifically, that means - // changing the SpacesBefore attribute on a token while leaving the - // other token attributes unchanged. - - lines := linesForFormat(tokens) - formatIndent(lines) - formatSpaces(lines) - formatCells(lines) -} - -func formatIndent(lines []formatLine) { - // Our methodology for indents is to take the input one line at a time - // and count the bracketing delimiters on each line. If a line has a net - // increase in open brackets, we increase the indent level by one and - // remember how many new openers we had. If the line has a net _decrease_, - // we'll compare it to the most recent number of openers and decrease the - // dedent level by one each time we pass an indent level remembered - // earlier. - // The "indent stack" used here allows for us to recognize degenerate - // input where brackets are not symmetrical within lines and avoid - // pushing things too far left or right, creating confusion. - - // We'll start our indent stack at a reasonable capacity to minimize the - // chance of us needing to grow it; 10 here means 10 levels of indent, - // which should be more than enough for reasonable HCL uses. - indents := make([]int, 0, 10) - - for i := range lines { - line := &lines[i] - if len(line.lead) == 0 { - continue - } - - if line.lead[0].Type == hclsyntax.TokenNewline { - // Never place spaces before a newline - line.lead[0].SpacesBefore = 0 - continue - } - - netBrackets := 0 - for _, token := range line.lead { - netBrackets += tokenBracketChange(token) - if token.Type == hclsyntax.TokenOHeredoc { - break - } - } - - for _, token := range line.assign { - netBrackets += tokenBracketChange(token) - } - - switch { - case netBrackets > 0: - line.lead[0].SpacesBefore = 2 * len(indents) - indents = append(indents, netBrackets) - case netBrackets < 0: - closed := -netBrackets - for closed > 0 && len(indents) > 0 { - switch { - - case closed > indents[len(indents)-1]: - closed -= indents[len(indents)-1] - indents = indents[:len(indents)-1] - - case closed < indents[len(indents)-1]: - indents[len(indents)-1] -= closed - closed = 0 - - default: - indents = indents[:len(indents)-1] - closed = 0 - } - } - line.lead[0].SpacesBefore = 2 * len(indents) - default: - line.lead[0].SpacesBefore = 2 * len(indents) - } - } -} - -func formatSpaces(lines []formatLine) { - for _, line := range lines { - for i, token := range line.lead { - var before, after *Token - if i > 0 { - before = line.lead[i-1] - } else { - before = nilToken - } - if i < (len(line.lead) - 1) { - after = line.lead[i+1] - } else { - after = nilToken - } - if spaceAfterToken(token, before, after) { - after.SpacesBefore = 1 - } else { - after.SpacesBefore = 0 - } - } - for i, token := range line.assign { - if i == 0 { - // first token in "assign" always has one space before to - // separate the equals sign from what it's assigning. - token.SpacesBefore = 1 - } - - var before, after *Token - if i > 0 { - before = line.assign[i-1] - } else { - before = nilToken - } - if i < (len(line.assign) - 1) { - after = line.assign[i+1] - } else { - after = nilToken - } - if spaceAfterToken(token, before, after) { - after.SpacesBefore = 1 - } else { - after.SpacesBefore = 0 - } - } - - } -} - -func formatCells(lines []formatLine) { - - chainStart := -1 - maxColumns := 0 - - // We'll deal with the "assign" cell first, since moving that will - // also impact the "comment" cell. - closeAssignChain := func(i int) { - for _, chainLine := range lines[chainStart:i] { - columns := chainLine.lead.Columns() - spaces := (maxColumns - columns) + 1 - chainLine.assign[0].SpacesBefore = spaces - } - chainStart = -1 - maxColumns = 0 - } - for i, line := range lines { - if line.assign == nil { - if chainStart != -1 { - closeAssignChain(i) - } - } else { - if chainStart == -1 { - chainStart = i - } - columns := line.lead.Columns() - if columns > maxColumns { - maxColumns = columns - } - } - } - if chainStart != -1 { - closeAssignChain(len(lines)) - } - - // Now we'll deal with the comments - closeCommentChain := func(i int) { - for _, chainLine := range lines[chainStart:i] { - columns := chainLine.lead.Columns() + chainLine.assign.Columns() - spaces := (maxColumns - columns) + 1 - chainLine.comment[0].SpacesBefore = spaces - } - chainStart = -1 - maxColumns = 0 - } - for i, line := range lines { - if line.comment == nil { - if chainStart != -1 { - closeCommentChain(i) - } - } else { - if chainStart == -1 { - chainStart = i - } - columns := line.lead.Columns() + line.assign.Columns() - if columns > maxColumns { - maxColumns = columns - } - } - } - if chainStart != -1 { - closeCommentChain(len(lines)) - } - -} - -// spaceAfterToken decides whether a particular subject token should have a -// space after it when surrounded by the given before and after tokens. -// "before" can be TokenNil, if the subject token is at the start of a sequence. -func spaceAfterToken(subject, before, after *Token) bool { - switch { - - case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil: - // Never add spaces before a newline - return false - - case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen: - // Don't split a function name from open paren in a call - return false - - case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot: - // Don't use spaces around attribute access dots - return false - - case after.Type == hclsyntax.TokenComma || after.Type == hclsyntax.TokenEllipsis: - // No space right before a comma or ... in an argument list - return false - - case subject.Type == hclsyntax.TokenComma: - // Always a space after a comma - return true - - case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc: - // No extra spaces within templates - return false - - case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent: - // This is a special case for inside for expressions where a user - // might want to use a literal tuple constructor: - // [for x in [foo]: x] - // ... in that case, we would normally produce in[foo] thinking that - // in is a reference, but we'll recognize it as a keyword here instead - // to make the result less confusing. - return true - - case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0): - return false - - case subject.Type == hclsyntax.TokenMinus: - // Since a minus can either be subtraction or negation, and the latter - // should _not_ have a space after it, we need to use some heuristics - // to decide which case this is. - // We guess that we have a negation if the token before doesn't look - // like it could be the end of an expression. - - switch before.Type { - - case hclsyntax.TokenNil: - // Minus at the start of input must be a negation - return false - - case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion: - // Minus immediately after an opening bracket or separator must be a negation. - return false - - case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus: - // Minus immediately after another arithmetic operator must be negation. - return false - - case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq: - // Minus immediately after another comparison operator must be negation. - return false - - case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang: - // Minus immediately after logical operator doesn't make sense but probably intended as negation. - return false - - default: - return true - } - - case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace: - // Unlike other bracket types, braces have spaces on both sides of them, - // both in single-line nested blocks foo { bar = baz } and in object - // constructor expressions foo = { bar = baz }. - if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace { - // An open brace followed by a close brace is an exception, however. - // e.g. foo {} rather than foo { } - return false - } - return true - - // In the unlikely event that an interpolation expression is just - // a single object constructor, we'll put a space between the ${ and - // the following { to make this more obvious, and then the same - // thing for the two braces at the end. - case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace: - return true - case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd: - return true - - // Don't add spaces between interpolated items - case subject.Type == hclsyntax.TokenTemplateSeqEnd && (after.Type == hclsyntax.TokenTemplateInterp || after.Type == hclsyntax.TokenTemplateControl): - return false - - case tokenBracketChange(subject) > 0: - // No spaces after open brackets - return false - - case tokenBracketChange(after) < 0: - // No spaces before close brackets - return false - - default: - // Most tokens are space-separated - return true - - } -} - -func linesForFormat(tokens Tokens) []formatLine { - if len(tokens) == 0 { - return make([]formatLine, 0) - } - - // first we'll count our lines, so we can allocate the array for them in - // a single block. (We want to minimize memory pressure in this codepath, - // so it can be run somewhat-frequently by editor integrations.) - lineCount := 1 // if there are zero newlines then there is one line - for _, tok := range tokens { - if tokenIsNewline(tok) { - lineCount++ - } - } - - // To start, we'll just put everything in the "lead" cell on each line, - // and then do another pass over the lines afterwards to adjust. - lines := make([]formatLine, lineCount) - li := 0 - lineStart := 0 - for i, tok := range tokens { - if tok.Type == hclsyntax.TokenEOF { - // The EOF token doesn't belong to any line, and terminates the - // token sequence. - lines[li].lead = tokens[lineStart:i] - break - } - - if tokenIsNewline(tok) { - lines[li].lead = tokens[lineStart : i+1] - lineStart = i + 1 - li++ - } - } - - // If a set of tokens doesn't end in TokenEOF (e.g. because it's a - // fragment of tokens from the middle of a file) then we might fall - // out here with a line still pending. - if lineStart < len(tokens) { - lines[li].lead = tokens[lineStart:] - if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF { - lines[li].lead = lines[li].lead[:len(lines[li].lead)-1] - } - } - - // Now we'll pick off any trailing comments and attribute assignments - // to shuffle off into the "comment" and "assign" cells. - for i := range lines { - line := &lines[i] - - if len(line.lead) == 0 { - // if the line is empty then there's nothing for us to do - // (this should happen only for the final line, because all other - // lines would have a newline token of some kind) - continue - } - - if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment { - line.comment = line.lead[len(line.lead)-1:] - line.lead = line.lead[:len(line.lead)-1] - } - - for i, tok := range line.lead { - if i > 0 && tok.Type == hclsyntax.TokenEqual { - // We only move the tokens into "assign" if the RHS seems to - // be a whole expression, which we determine by counting - // brackets. If there's a net positive number of brackets - // then that suggests we're introducing a multi-line expression. - netBrackets := 0 - for _, token := range line.lead[i:] { - netBrackets += tokenBracketChange(token) - } - - if netBrackets == 0 { - line.assign = line.lead[i:] - line.lead = line.lead[:i] - } - break - } - } - } - - return lines -} - -func tokenIsNewline(tok *Token) bool { - if tok.Type == hclsyntax.TokenNewline { - return true - } else if tok.Type == hclsyntax.TokenComment { - // Single line tokens (# and //) consume their terminating newline, - // so we need to treat them as newline tokens as well. - if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { - return true - } - } - return false -} - -func tokenBracketChange(tok *Token) int { - switch tok.Type { - case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp: - return 1 - case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd: - return -1 - default: - return 0 - } -} - -// formatLine represents a single line of source code for formatting purposes, -// splitting its tokens into up to three "cells": -// -// lead: always present, representing everything up to one of the others -// assign: if line contains an attribute assignment, represents the tokens -// starting at (and including) the equals symbol -// comment: if line contains any non-comment tokens and ends with a -// single-line comment token, represents the comment. -// -// When formatting, the leading spaces of the first tokens in each of these -// cells is adjusted to align vertically their occurences on consecutive -// rows. -type formatLine struct { - lead Tokens - assign Tokens - comment Tokens -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go deleted file mode 100644 index 289a30d6..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go +++ /dev/null @@ -1,250 +0,0 @@ -package hclwrite - -import ( - "fmt" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// TokensForValue returns a sequence of tokens that represents the given -// constant value. -// -// This function only supports types that are used by HCL. In particular, it -// does not support capsule types and will panic if given one. -// -// It is not possible to express an unknown value in source code, so this -// function will panic if the given value is unknown or contains any unknown -// values. A caller can call the value's IsWhollyKnown method to verify that -// no unknown values are present before calling TokensForValue. -func TokensForValue(val cty.Value) Tokens { - toks := appendTokensForValue(val, nil) - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -// TokensForTraversal returns a sequence of tokens that represents the given -// traversal. -// -// If the traversal is absolute then the result is a self-contained, valid -// reference expression. If the traversal is relative then the returned tokens -// could be appended to some other expression tokens to traverse into the -// represented expression. -func TokensForTraversal(traversal hcl.Traversal) Tokens { - toks := appendTokensForTraversal(traversal, nil) - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -func appendTokensForValue(val cty.Value, toks Tokens) Tokens { - switch { - - case !val.IsKnown(): - panic("cannot produce tokens for unknown value") - - case val.IsNull(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(`null`), - }) - - case val.Type() == cty.Bool: - var src []byte - if val.True() { - src = []byte(`true`) - } else { - src = []byte(`false`) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: src, - }) - - case val.Type() == cty.Number: - bf := val.AsBigFloat() - srcStr := bf.Text('f', -1) - toks = append(toks, &Token{ - Type: hclsyntax.TokenNumberLit, - Bytes: []byte(srcStr), - }) - - case val.Type() == cty.String: - // TODO: If it's a multi-line string ending in a newline, format - // it as a HEREDOC instead. - src := escapeQuotedStringLit(val.AsString()) - toks = append(toks, &Token{ - Type: hclsyntax.TokenOQuote, - Bytes: []byte{'"'}, - }) - if len(src) > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenQuotedLit, - Bytes: src, - }) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenCQuote, - Bytes: []byte{'"'}, - }) - - case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }) - - i := 0 - for it := val.ElementIterator(); it.Next(); { - if i > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenComma, - Bytes: []byte{','}, - }) - } - _, eVal := it.Element() - toks = appendTokensForValue(eVal, toks) - i++ - } - - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }) - - case val.Type().IsMapType() || val.Type().IsObjectType(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrace, - Bytes: []byte{'{'}, - }) - - i := 0 - for it := val.ElementIterator(); it.Next(); { - if i > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenComma, - Bytes: []byte{','}, - }) - } - eKey, eVal := it.Element() - if hclsyntax.ValidIdentifier(eKey.AsString()) { - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(eKey.AsString()), - }) - } else { - toks = appendTokensForValue(eKey, toks) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenEqual, - Bytes: []byte{'='}, - }) - toks = appendTokensForValue(eVal, toks) - i++ - } - - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrace, - Bytes: []byte{'}'}, - }) - - default: - panic(fmt.Sprintf("cannot produce tokens for %#v", val)) - } - - return toks -} - -func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens { - for _, step := range traversal { - appendTokensForTraversalStep(step, toks) - } - return toks -} - -func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) { - switch ts := step.(type) { - case hcl.TraverseRoot: - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - }) - case hcl.TraverseAttr: - toks = append( - toks, - &Token{ - Type: hclsyntax.TokenDot, - Bytes: []byte{'.'}, - }, - &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - }, - ) - case hcl.TraverseIndex: - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }) - appendTokensForValue(ts.Key, toks) - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }) - default: - panic(fmt.Sprintf("unsupported traversal step type %T", step)) - } -} - -func escapeQuotedStringLit(s string) []byte { - if len(s) == 0 { - return nil - } - buf := make([]byte, 0, len(s)) - for i, r := range s { - switch r { - case '\n': - buf = append(buf, '\\', 'n') - case '\r': - buf = append(buf, '\\', 'r') - case '\t': - buf = append(buf, '\\', 't') - case '"': - buf = append(buf, '\\', '"') - case '\\': - buf = append(buf, '\\', '\\') - case '$', '%': - buf = appendRune(buf, r) - remain := s[i+1:] - if len(remain) > 0 && remain[0] == '{' { - // Double up our template introducer symbol to escape it. - buf = appendRune(buf, r) - } - default: - if !unicode.IsPrint(r) { - var fmted string - if r < 65536 { - fmted = fmt.Sprintf("\\u%04x", r) - } else { - fmted = fmt.Sprintf("\\U%08x", r) - } - buf = append(buf, fmted...) - } else { - buf = appendRune(buf, r) - } - } - } - return buf -} - -func appendRune(b []byte, r rune) []byte { - l := utf8.RuneLen(r) - for i := 0; i < l; i++ { - b = append(b, 0) // make room at the end of our buffer - } - ch := b[len(b)-l:] - utf8.EncodeRune(ch, r) - return b -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go deleted file mode 100644 index cedf6862..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go +++ /dev/null @@ -1,23 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -type nativeNodeSorter struct { - Nodes []hclsyntax.Node -} - -func (s nativeNodeSorter) Len() int { - return len(s.Nodes) -} - -func (s nativeNodeSorter) Less(i, j int) bool { - rangeI := s.Nodes[i].Range() - rangeJ := s.Nodes[j].Range() - return rangeI.Start.Byte < rangeJ.Start.Byte -} - -func (s nativeNodeSorter) Swap(i, j int) { - s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i] -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go deleted file mode 100644 index 45669f7f..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go +++ /dev/null @@ -1,260 +0,0 @@ -package hclwrite - -import ( - "fmt" - - "github.com/google/go-cmp/cmp" -) - -// node represents a node in the AST. -type node struct { - content nodeContent - - list *nodes - before, after *node -} - -func newNode(c nodeContent) *node { - return &node{ - content: c, - } -} - -func (n *node) Equal(other *node) bool { - return cmp.Equal(n.content, other.content) -} - -func (n *node) BuildTokens(to Tokens) Tokens { - return n.content.BuildTokens(to) -} - -// Detach removes the receiver from the list it currently belongs to. If the -// node is not currently in a list, this is a no-op. -func (n *node) Detach() { - if n.list == nil { - return - } - if n.before != nil { - n.before.after = n.after - } - if n.after != nil { - n.after.before = n.before - } - if n.list.first == n { - n.list.first = n.after - } - if n.list.last == n { - n.list.last = n.before - } - n.list = nil - n.before = nil - n.after = nil -} - -// ReplaceWith removes the receiver from the list it currently belongs to and -// inserts a new node with the given content in its place. If the node is not -// currently in a list, this function will panic. -// -// The return value is the newly-constructed node, containing the given content. -// After this function returns, the reciever is no longer attached to a list. -func (n *node) ReplaceWith(c nodeContent) *node { - if n.list == nil { - panic("can't replace node that is not in a list") - } - - before := n.before - after := n.after - list := n.list - n.before, n.after, n.list = nil, nil, nil - - nn := newNode(c) - nn.before = before - nn.after = after - nn.list = list - if before != nil { - before.after = nn - } - if after != nil { - after.before = nn - } - return nn -} - -func (n *node) assertUnattached() { - if n.list != nil { - panic(fmt.Sprintf("attempt to attach already-attached node %#v", n)) - } -} - -// nodeContent is the interface type implemented by all AST content types. -type nodeContent interface { - walkChildNodes(w internalWalkFunc) - BuildTokens(to Tokens) Tokens -} - -// nodes is a list of nodes. -type nodes struct { - first, last *node -} - -func (ns *nodes) BuildTokens(to Tokens) Tokens { - for n := ns.first; n != nil; n = n.after { - to = n.BuildTokens(to) - } - return to -} - -func (ns *nodes) Clear() { - ns.first = nil - ns.last = nil -} - -func (ns *nodes) Append(c nodeContent) *node { - n := &node{ - content: c, - } - ns.AppendNode(n) - n.list = ns - return n -} - -func (ns *nodes) AppendNode(n *node) { - if ns.last != nil { - n.before = ns.last - ns.last.after = n - } - n.list = ns - ns.last = n - if ns.first == nil { - ns.first = n - } -} - -func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node { - if len(tokens) == 0 { - return nil - } - n := newNode(tokens) - ns.AppendNode(n) - n.list = ns - return n -} - -// FindNodeWithContent searches the nodes for a node whose content equals -// the given content. If it finds one then it returns it. Otherwise it returns -// nil. -func (ns *nodes) FindNodeWithContent(content nodeContent) *node { - for n := ns.first; n != nil; n = n.after { - if n.content == content { - return n - } - } - return nil -} - -// nodeSet is an unordered set of nodes. It is used to describe a set of nodes -// that all belong to the same list that have some role or characteristic -// in common. -type nodeSet map[*node]struct{} - -func newNodeSet() nodeSet { - return make(nodeSet) -} - -func (ns nodeSet) Has(n *node) bool { - if ns == nil { - return false - } - _, exists := ns[n] - return exists -} - -func (ns nodeSet) Add(n *node) { - ns[n] = struct{}{} -} - -func (ns nodeSet) Remove(n *node) { - delete(ns, n) -} - -func (ns nodeSet) List() []*node { - if len(ns) == 0 { - return nil - } - - ret := make([]*node, 0, len(ns)) - - // Determine which list we are working with. We assume here that all of - // the nodes belong to the same list, since that is part of the contract - // for nodeSet. - var list *nodes - for n := range ns { - list = n.list - break - } - - // We recover the order by iterating over the whole list. This is not - // the most efficient way to do it, but our node lists should always be - // small so not worth making things more complex. - for n := list.first; n != nil; n = n.after { - if ns.Has(n) { - ret = append(ret, n) - } - } - return ret -} - -// FindNodeWithContent searches the nodes for a node whose content equals -// the given content. If it finds one then it returns it. Otherwise it returns -// nil. -func (ns nodeSet) FindNodeWithContent(content nodeContent) *node { - for n := range ns { - if n.content == content { - return n - } - } - return nil -} - -type internalWalkFunc func(*node) - -// inTree can be embedded into a content struct that has child nodes to get -// a standard implementation of the NodeContent interface and a record of -// a potential parent node. -type inTree struct { - parent *node - children *nodes -} - -func newInTree() inTree { - return inTree{ - children: &nodes{}, - } -} - -func (it *inTree) assertUnattached() { - if it.parent != nil { - panic(fmt.Sprintf("node is already attached to %T", it.parent.content)) - } -} - -func (it *inTree) walkChildNodes(w internalWalkFunc) { - for n := it.children.first; n != nil; n = n.after { - w(n) - } -} - -func (it *inTree) BuildTokens(to Tokens) Tokens { - for n := it.children.first; n != nil; n = n.after { - to = n.BuildTokens(to) - } - return to -} - -// leafNode can be embedded into a content struct to give it a do-nothing -// implementation of walkChildNodes -type leafNode struct { -} - -func (n *leafNode) walkChildNodes(w internalWalkFunc) { -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go deleted file mode 100644 index d6cf532e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go +++ /dev/null @@ -1,599 +0,0 @@ -package hclwrite - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// Our "parser" here is actually not doing any parsing of its own. Instead, -// it leans on the native parser in hclsyntax, and then uses the source ranges -// from the AST to partition the raw token sequence to match the raw tokens -// up to AST nodes. -// -// This strategy feels somewhat counter-intuitive, since most of the work the -// parser does is thrown away here, but this strategy is chosen because the -// normal parsing work done by hclsyntax is considered to be the "main case", -// while modifying and re-printing source is more of an edge case, used only -// in ancillary tools, and so it's good to keep all the main parsing logic -// with the main case but keep all of the extra complexity of token wrangling -// out of the main parser, which is already rather complex just serving the -// use-cases it already serves. -// -// If the parsing step produces any errors, the returned File is nil because -// we can't reliably extract tokens from the partial AST produced by an -// erroneous parse. -func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { - file, diags := hclsyntax.ParseConfig(src, filename, start) - if diags.HasErrors() { - return nil, diags - } - - // To do our work here, we use the "native" tokens (those from hclsyntax) - // to match against source ranges in the AST, but ultimately produce - // slices from our sequence of "writer" tokens, which contain only - // *relative* position information that is more appropriate for - // transformation/writing use-cases. - nativeTokens, diags := hclsyntax.LexConfig(src, filename, start) - if diags.HasErrors() { - // should never happen, since we would've caught these diags in - // the first call above. - return nil, diags - } - writerTokens := writerTokens(nativeTokens) - - from := inputTokens{ - nativeTokens: nativeTokens, - writerTokens: writerTokens, - } - - before, root, after := parseBody(file.Body.(*hclsyntax.Body), from) - ret := &File{ - inTree: newInTree(), - - srcBytes: src, - body: root, - } - - nodes := ret.inTree.children - nodes.Append(before.Tokens()) - nodes.AppendNode(root) - nodes.Append(after.Tokens()) - - return ret, diags -} - -type inputTokens struct { - nativeTokens hclsyntax.Tokens - writerTokens Tokens -} - -func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) { - start, end := partitionTokens(it.nativeTokens, rng) - before = it.Slice(0, start) - within = it.Slice(start, end) - after = it.Slice(end, len(it.nativeTokens)) - return -} - -func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) { - for i, t := range it.writerTokens { - if t.Type == ty { - return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens)) - } - } - panic(fmt.Sprintf("didn't find any token of type %s", ty)) -} - -func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) { - before, within, after := it.PartitionType(ty) - if within.Len() != 1 { - panic("PartitionType found more than one token") - } - return before, within.Tokens()[0], after -} - -// PartitionIncludeComments is like Partition except the returned "within" -// range includes any lead and line comments associated with the range. -func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) { - start, end := partitionTokens(it.nativeTokens, rng) - start = partitionLeadCommentTokens(it.nativeTokens[:start]) - _, afterNewline := partitionLineEndTokens(it.nativeTokens[end:]) - end += afterNewline - - before = it.Slice(0, start) - within = it.Slice(start, end) - after = it.Slice(end, len(it.nativeTokens)) - return - -} - -// PartitionBlockItem is similar to PartitionIncludeComments but it returns -// the comments as separate token sequences so that they can be captured into -// AST attributes. It makes assumptions that apply only to block items, so -// should not be used for other constructs. -func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) { - before, within, after = it.Partition(rng) - before, leadComments = before.PartitionLeadComments() - lineComments, newline, after = after.PartitionLineEndTokens() - return -} - -func (it inputTokens) PartitionLeadComments() (before, within inputTokens) { - start := partitionLeadCommentTokens(it.nativeTokens) - before = it.Slice(0, start) - within = it.Slice(start, len(it.nativeTokens)) - return -} - -func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) { - afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens) - comments = it.Slice(0, afterComments) - newline = it.Slice(afterComments, afterNewline) - after = it.Slice(afterNewline, len(it.nativeTokens)) - return -} - -func (it inputTokens) Slice(start, end int) inputTokens { - // When we slice, we create a new slice with no additional capacity because - // we expect that these slices will be mutated in order to insert - // new code into the AST, and we want to ensure that a new underlying - // array gets allocated in that case, rather than writing into some - // following slice and corrupting it. - return inputTokens{ - nativeTokens: it.nativeTokens[start:end:end], - writerTokens: it.writerTokens[start:end:end], - } -} - -func (it inputTokens) Len() int { - return len(it.nativeTokens) -} - -func (it inputTokens) Tokens() Tokens { - return it.writerTokens -} - -func (it inputTokens) Types() []hclsyntax.TokenType { - ret := make([]hclsyntax.TokenType, len(it.nativeTokens)) - for i, tok := range it.nativeTokens { - ret[i] = tok.Type - } - return ret -} - -// parseBody locates the given body within the given input tokens and returns -// the resulting *Body object as well as the tokens that appeared before and -// after it. -func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) { - before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange) - - // The main AST doesn't retain the original source ordering of the - // body items, so we need to reconstruct that ordering by inspecting - // their source ranges. - nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks)) - for _, nativeAttr := range nativeBody.Attributes { - nativeItems = append(nativeItems, nativeAttr) - } - for _, nativeBlock := range nativeBody.Blocks { - nativeItems = append(nativeItems, nativeBlock) - } - sort.Sort(nativeNodeSorter{nativeItems}) - - body := &Body{ - inTree: newInTree(), - items: newNodeSet(), - } - - remain := within - for _, nativeItem := range nativeItems { - beforeItem, item, afterItem := parseBodyItem(nativeItem, remain) - - if beforeItem.Len() > 0 { - body.AppendUnstructuredTokens(beforeItem.Tokens()) - } - body.appendItemNode(item) - - remain = afterItem - } - - if remain.Len() > 0 { - body.AppendUnstructuredTokens(remain.Tokens()) - } - - return before, newNode(body), after -} - -func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) { - before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range()) - - var item *node - - switch tItem := nativeItem.(type) { - case *hclsyntax.Attribute: - item = parseAttribute(tItem, within, leadComments, lineComments, newline) - case *hclsyntax.Block: - item = parseBlock(tItem, within, leadComments, lineComments, newline) - default: - // should never happen if caller is behaving - panic("unsupported native item type") - } - - return before, item, after -} - -func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node { - attr := &Attribute{ - inTree: newInTree(), - } - children := attr.inTree.children - - { - cn := newNode(newComments(leadComments.Tokens())) - attr.leadComments = cn - children.AppendNode(cn) - } - - before, nameTokens, from := from.Partition(nativeAttr.NameRange) - { - children.AppendUnstructuredTokens(before.Tokens()) - if nameTokens.Len() != 1 { - // Should never happen with valid input - panic("attribute name is not exactly one token") - } - token := nameTokens.Tokens()[0] - in := newNode(newIdentifier(token)) - attr.name = in - children.AppendNode(in) - } - - before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendUnstructuredTokens(equalsTokens.Tokens()) - - before, exprTokens, from := from.Partition(nativeAttr.Expr.Range()) - { - children.AppendUnstructuredTokens(before.Tokens()) - exprNode := parseExpression(nativeAttr.Expr, exprTokens) - attr.expr = exprNode - children.AppendNode(exprNode) - } - - { - cn := newNode(newComments(lineComments.Tokens())) - attr.lineComments = cn - children.AppendNode(cn) - } - - children.AppendUnstructuredTokens(newline.Tokens()) - - // Collect any stragglers, though there shouldn't be any - children.AppendUnstructuredTokens(from.Tokens()) - - return newNode(attr) -} - -func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node { - block := &Block{ - inTree: newInTree(), - labels: newNodeSet(), - } - children := block.inTree.children - - { - cn := newNode(newComments(leadComments.Tokens())) - block.leadComments = cn - children.AppendNode(cn) - } - - before, typeTokens, from := from.Partition(nativeBlock.TypeRange) - { - children.AppendUnstructuredTokens(before.Tokens()) - if typeTokens.Len() != 1 { - // Should never happen with valid input - panic("block type name is not exactly one token") - } - token := typeTokens.Tokens()[0] - in := newNode(newIdentifier(token)) - block.typeName = in - children.AppendNode(in) - } - - for _, rng := range nativeBlock.LabelRanges { - var labelTokens inputTokens - before, labelTokens, from = from.Partition(rng) - children.AppendUnstructuredTokens(before.Tokens()) - tokens := labelTokens.Tokens() - var ln *node - if len(tokens) == 1 && tokens[0].Type == hclsyntax.TokenIdent { - ln = newNode(newIdentifier(tokens[0])) - } else { - ln = newNode(newQuoted(tokens)) - } - block.labels.Add(ln) - children.AppendNode(ln) - } - - before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendUnstructuredTokens(oBrace.Tokens()) - - // We go a bit out of order here: we go hunting for the closing brace - // so that we have a delimited body, but then we'll deal with the body - // before we actually append the closing brace and any straggling tokens - // that appear after it. - bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange) - before, body, after := parseBody(nativeBlock.Body, bodyTokens) - children.AppendUnstructuredTokens(before.Tokens()) - block.body = body - children.AppendNode(body) - children.AppendUnstructuredTokens(after.Tokens()) - - children.AppendUnstructuredTokens(cBrace.Tokens()) - - // stragglers - children.AppendUnstructuredTokens(from.Tokens()) - if lineComments.Len() > 0 { - // blocks don't actually have line comments, so we'll just treat - // them as extra stragglers - children.AppendUnstructuredTokens(lineComments.Tokens()) - } - children.AppendUnstructuredTokens(newline.Tokens()) - - return newNode(block) -} - -func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node { - expr := newExpression() - children := expr.inTree.children - - nativeVars := nativeExpr.Variables() - - for _, nativeTraversal := range nativeVars { - before, traversal, after := parseTraversal(nativeTraversal, from) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendNode(traversal) - expr.absTraversals.Add(traversal) - from = after - } - // Attach any stragglers that don't belong to a traversal to the expression - // itself. In an expression with no traversals at all, this is just the - // entirety of "from". - children.AppendUnstructuredTokens(from.Tokens()) - - return newNode(expr) -} - -func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) { - traversal := newTraversal() - children := traversal.inTree.children - before, from, after = from.Partition(nativeTraversal.SourceRange()) - - stepAfter := from - for _, nativeStep := range nativeTraversal { - before, step, after := parseTraversalStep(nativeStep, stepAfter) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendNode(step) - traversal.steps.Add(step) - stepAfter = after - } - - return before, newNode(traversal), after -} - -func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) { - var children *nodes - switch tNativeStep := nativeStep.(type) { - - case hcl.TraverseRoot, hcl.TraverseAttr: - step := newTraverseName() - children = step.inTree.children - before, from, after = from.Partition(nativeStep.SourceRange()) - inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent) - name := newIdentifier(token) - children.AppendUnstructuredTokens(inBefore.Tokens()) - step.name = children.Append(name) - children.AppendUnstructuredTokens(inAfter.Tokens()) - return before, newNode(step), after - - case hcl.TraverseIndex: - step := newTraverseIndex() - children = step.inTree.children - before, from, after = from.Partition(nativeStep.SourceRange()) - - var inBefore, oBrack, keyTokens, cBrack inputTokens - inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack) - children.AppendUnstructuredTokens(inBefore.Tokens()) - children.AppendUnstructuredTokens(oBrack.Tokens()) - keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack) - - keyVal := tNativeStep.Key - switch keyVal.Type() { - case cty.String: - key := newQuoted(keyTokens.Tokens()) - step.key = children.Append(key) - case cty.Number: - valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit) - children.AppendUnstructuredTokens(valBefore.Tokens()) - key := newNumber(valToken) - step.key = children.Append(key) - children.AppendUnstructuredTokens(valAfter.Tokens()) - } - - children.AppendUnstructuredTokens(cBrack.Tokens()) - children.AppendUnstructuredTokens(from.Tokens()) - - return before, newNode(step), after - default: - panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep)) - } - -} - -// writerTokens takes a sequence of tokens as produced by the main hclsyntax -// package and transforms it into an equivalent sequence of tokens using -// this package's own token model. -// -// The resulting list contains the same number of tokens and uses the same -// indices as the input, allowing the two sets of tokens to be correlated -// by index. -func writerTokens(nativeTokens hclsyntax.Tokens) Tokens { - // Ultimately we want a slice of token _pointers_, but since we can - // predict how much memory we're going to devote to tokens we'll allocate - // it all as a single flat buffer and thus give the GC less work to do. - tokBuf := make([]Token, len(nativeTokens)) - var lastByteOffset int - for i, mainToken := range nativeTokens { - // Create a copy of the bytes so that we can mutate without - // corrupting the original token stream. - bytes := make([]byte, len(mainToken.Bytes)) - copy(bytes, mainToken.Bytes) - - tokBuf[i] = Token{ - Type: mainToken.Type, - Bytes: bytes, - - // We assume here that spaces are always ASCII spaces, since - // that's what the scanner also assumes, and thus the number - // of bytes skipped is also the number of space characters. - SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset, - } - - lastByteOffset = mainToken.Range.End.Byte - } - - // Now make a slice of pointers into the previous slice. - ret := make(Tokens, len(tokBuf)) - for i := range ret { - ret[i] = &tokBuf[i] - } - - return ret -} - -// partitionTokens takes a sequence of tokens and a hcl.Range and returns -// two indices within the token sequence that correspond with the range -// boundaries, such that the slice operator could be used to produce -// three token sequences for before, within, and after respectively: -// -// start, end := partitionTokens(toks, rng) -// before := toks[:start] -// within := toks[start:end] -// after := toks[end:] -// -// This works best when the range is aligned with token boundaries (e.g. -// because it was produced in terms of the scanner's result) but if that isn't -// true then it will make a best effort that may produce strange results at -// the boundaries. -// -// Native hclsyntax tokens are used here, because they contain the necessary -// absolute position information. However, since writerTokens produces a -// correlatable sequence of writer tokens, the resulting indices can be -// used also to index into its result, allowing the partitioning of writer -// tokens to be driven by the partitioning of native tokens. -// -// The tokens are assumed to be in source order and non-overlapping, which -// will be true if the token sequence from the scanner is used directly. -func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) { - // We us a linear search here because we assume tha in most cases our - // target range is close to the beginning of the sequence, and the seqences - // are generally small for most reasonable files anyway. - for i := 0; ; i++ { - if i >= len(toks) { - // No tokens for the given range at all! - return len(toks), len(toks) - } - - if toks[i].Range.Start.Byte >= rng.Start.Byte { - start = i - break - } - } - - for i := start; ; i++ { - if i >= len(toks) { - // The range "hangs off" the end of the token sequence - return start, len(toks) - } - - if toks[i].Range.Start.Byte >= rng.End.Byte { - end = i // end marker is exclusive - break - } - } - - return start, end -} - -// partitionLeadCommentTokens takes a sequence of tokens that is assumed -// to immediately precede a construct that can have lead comment tokens, -// and returns the index into that sequence where the lead comments begin. -// -// Lead comments are defined as whole lines containing only comment tokens -// with no blank lines between. If no such lines are found, the returned -// index will be len(toks). -func partitionLeadCommentTokens(toks hclsyntax.Tokens) int { - // single-line comments (which is what we're interested in here) - // consume their trailing newline, so we can just walk backwards - // until we stop seeing comment tokens. - for i := len(toks) - 1; i >= 0; i-- { - if toks[i].Type != hclsyntax.TokenComment { - return i + 1 - } - } - return 0 -} - -// partitionLineEndTokens takes a sequence of tokens that is assumed -// to immediately follow a construct that can have a line comment, and -// returns first the index where any line comments end and then second -// the index immediately after the trailing newline. -// -// Line comments are defined as comments that appear immediately after -// a construct on the same line where its significant tokens ended. -// -// Since single-line comment tokens (# and //) include the newline that -// terminates them, in the presence of these the two returned indices -// will be the same since the comment itself serves as the line end. -func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) { - for i := 0; i < len(toks); i++ { - tok := toks[i] - if tok.Type != hclsyntax.TokenComment { - switch tok.Type { - case hclsyntax.TokenNewline: - return i, i + 1 - case hclsyntax.TokenEOF: - // Although this is valid, we mustn't include the EOF - // itself as our "newline" or else strange things will - // happen when we try to append new items. - return i, i - default: - // If we have well-formed input here then nothing else should be - // possible. This path should never happen, because we only try - // to extract tokens from the sequence if the parser succeeded, - // and it should catch this problem itself. - panic("malformed line trailers: expected only comments and newlines") - } - } - - if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { - // Newline at the end of a single-line comment serves both as - // the end of comments *and* the end of the line. - return i + 1, i + 1 - } - } - return len(toks), len(toks) -} - -// lexConfig uses the hclsyntax scanner to get a token stream and then -// rewrites it into this package's token model. -// -// Any errors produced during scanning are ignored, so the results of this -// function should be used with care. -func lexConfig(src []byte) Tokens { - mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1}) - return writerTokens(mainTokens) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go deleted file mode 100644 index 678a3aa4..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go +++ /dev/null @@ -1,44 +0,0 @@ -package hclwrite - -import ( - "bytes" - - "github.com/hashicorp/hcl/v2" -) - -// NewFile creates a new file object that is empty and ready to have constructs -// added t it. -func NewFile() *File { - body := &Body{ - inTree: newInTree(), - items: newNodeSet(), - } - file := &File{ - inTree: newInTree(), - } - file.body = file.inTree.children.Append(body) - return file -} - -// ParseConfig interprets the given source bytes into a *hclwrite.File. The -// resulting AST can be used to perform surgical edits on the source code -// before turning it back into bytes again. -func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { - return parse(src, filename, start) -} - -// Format takes source code and performs simple whitespace changes to transform -// it to a canonical layout style. -// -// Format skips constructing an AST and works directly with tokens, so it -// is less expensive than formatting via the AST for situations where no other -// changes will be made. It also ignores syntax errors and can thus be applied -// to partial source code, although the result in that case may not be -// desirable. -func Format(src []byte) []byte { - tokens := lexConfig(src) - format(tokens) - buf := &bytes.Buffer{} - tokens.WriteTo(buf) - return buf.Bytes() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go deleted file mode 100644 index 3bd3e5f4..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go +++ /dev/null @@ -1,122 +0,0 @@ -package hclwrite - -import ( - "bytes" - "io" - - "github.com/apparentlymart/go-textseg/textseg" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// Token is a single sequence of bytes annotated with a type. It is similar -// in purpose to hclsyntax.Token, but discards the source position information -// since that is not useful in code generation. -type Token struct { - Type hclsyntax.TokenType - Bytes []byte - - // We record the number of spaces before each token so that we can - // reproduce the exact layout of the original file when we're making - // surgical changes in-place. When _new_ code is created it will always - // be in the canonical style, but we preserve layout of existing code. - SpacesBefore int -} - -// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token. -// A complete token is not possible since we don't have source location -// information here, and so this method is unexported so we can be sure it will -// only be used for internal purposes where we know the range isn't important. -// -// This is primarily intended to allow us to re-use certain functionality from -// hclsyntax rather than re-implementing it against our own token type here. -func (t *Token) asHCLSyntax() hclsyntax.Token { - return hclsyntax.Token{ - Type: t.Type, - Bytes: t.Bytes, - Range: hcl.Range{ - Filename: "", - }, - } -} - -// Tokens is a flat list of tokens. -type Tokens []*Token - -func (ts Tokens) Bytes() []byte { - buf := &bytes.Buffer{} - ts.WriteTo(buf) - return buf.Bytes() -} - -func (ts Tokens) testValue() string { - return string(ts.Bytes()) -} - -// Columns returns the number of columns (grapheme clusters) the token sequence -// occupies. The result is not meaningful if there are newline or single-line -// comment tokens in the sequence. -func (ts Tokens) Columns() int { - ret := 0 - for _, token := range ts { - ret += token.SpacesBefore // spaces are always worth one column each - ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters) - ret += ct - } - return ret -} - -// WriteTo takes an io.Writer and writes the bytes for each token to it, -// along with the spacing that separates each token. In other words, this -// allows serializing the tokens to a file or other such byte stream. -func (ts Tokens) WriteTo(wr io.Writer) (int64, error) { - // We know we're going to be writing a lot of small chunks of repeated - // space characters, so we'll prepare a buffer of these that we can - // easily pass to wr.Write without any further allocation. - spaces := make([]byte, 40) - for i := range spaces { - spaces[i] = ' ' - } - - var n int64 - var err error - for _, token := range ts { - if err != nil { - return n, err - } - - for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) { - thisChunk := spacesBefore - if thisChunk > len(spaces) { - thisChunk = len(spaces) - } - var thisN int - thisN, err = wr.Write(spaces[:thisChunk]) - n += int64(thisN) - if err != nil { - return n, err - } - } - - var thisN int - thisN, err = wr.Write(token.Bytes) - n += int64(thisN) - } - - return n, err -} - -func (ts Tokens) walkChildNodes(w internalWalkFunc) { - // Unstructured tokens have no child nodes -} - -func (ts Tokens) BuildTokens(to Tokens) Tokens { - return append(to, ts...) -} - -func newIdentToken(name string) *Token { - return &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(name), - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/ast.go b/vendor/github.com/hashicorp/hcl/v2/json/ast.go deleted file mode 100644 index 9c580ca3..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/ast.go +++ /dev/null @@ -1,121 +0,0 @@ -package json - -import ( - "math/big" - - "github.com/hashicorp/hcl/v2" -) - -type node interface { - Range() hcl.Range - StartRange() hcl.Range -} - -type objectVal struct { - Attrs []*objectAttr - SrcRange hcl.Range // range of the entire object, brace-to-brace - OpenRange hcl.Range // range of the opening brace - CloseRange hcl.Range // range of the closing brace -} - -func (n *objectVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *objectVal) StartRange() hcl.Range { - return n.OpenRange -} - -type objectAttr struct { - Name string - Value node - NameRange hcl.Range // range of the name string -} - -func (n *objectAttr) Range() hcl.Range { - return n.NameRange -} - -func (n *objectAttr) StartRange() hcl.Range { - return n.NameRange -} - -type arrayVal struct { - Values []node - SrcRange hcl.Range // range of the entire object, bracket-to-bracket - OpenRange hcl.Range // range of the opening bracket -} - -func (n *arrayVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *arrayVal) StartRange() hcl.Range { - return n.OpenRange -} - -type booleanVal struct { - Value bool - SrcRange hcl.Range -} - -func (n *booleanVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *booleanVal) StartRange() hcl.Range { - return n.SrcRange -} - -type numberVal struct { - Value *big.Float - SrcRange hcl.Range -} - -func (n *numberVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *numberVal) StartRange() hcl.Range { - return n.SrcRange -} - -type stringVal struct { - Value string - SrcRange hcl.Range -} - -func (n *stringVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *stringVal) StartRange() hcl.Range { - return n.SrcRange -} - -type nullVal struct { - SrcRange hcl.Range -} - -func (n *nullVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *nullVal) StartRange() hcl.Range { - return n.SrcRange -} - -// invalidVal is used as a placeholder where a value is needed for a valid -// parse tree but the input was invalid enough to prevent one from being -// created. -type invalidVal struct { - SrcRange hcl.Range -} - -func (n invalidVal) Range() hcl.Range { - return n.SrcRange -} - -func (n invalidVal) StartRange() hcl.Range { - return n.SrcRange -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go deleted file mode 100644 index fbdd8bff..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go +++ /dev/null @@ -1,33 +0,0 @@ -package json - -import ( - "github.com/agext/levenshtein" -) - -var keywords = []string{"false", "true", "null"} - -// keywordSuggestion tries to find a valid JSON keyword that is close to the -// given string and returns it if found. If no keyword is close enough, returns -// the empty string. -func keywordSuggestion(given string) string { - return nameSuggestion(given, keywords) -} - -// nameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func nameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/doc.go b/vendor/github.com/hashicorp/hcl/v2/json/doc.go deleted file mode 100644 index 84d73193..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package json is the JSON parser for HCL. It parses JSON files and returns -// implementations of the core HCL structural interfaces in terms of the -// JSON data inside. -// -// This is not a generic JSON parser. Instead, it deals with the mapping from -// the JSON information model to the HCL information model, using a number -// of hard-coded structural conventions. -// -// In most cases applications will not import this package directly, but will -// instead access its functionality indirectly through functions in the main -// "hcl" package and in the "hclparse" package. -package json diff --git a/vendor/github.com/hashicorp/hcl/v2/json/navigation.go b/vendor/github.com/hashicorp/hcl/v2/json/navigation.go deleted file mode 100644 index bc8a97f7..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/navigation.go +++ /dev/null @@ -1,70 +0,0 @@ -package json - -import ( - "fmt" - "strings" -) - -type navigation struct { - root node -} - -// Implementation of hcled.ContextString -func (n navigation) ContextString(offset int) string { - steps := navigationStepsRev(n.root, offset) - if steps == nil { - return "" - } - - // We built our slice backwards, so we'll reverse it in-place now. - half := len(steps) / 2 // integer division - for i := 0; i < half; i++ { - steps[i], steps[len(steps)-1-i] = steps[len(steps)-1-i], steps[i] - } - - ret := strings.Join(steps, "") - if len(ret) > 0 && ret[0] == '.' { - ret = ret[1:] - } - return ret -} - -func navigationStepsRev(v node, offset int) []string { - switch tv := v.(type) { - case *objectVal: - // Do any of our properties have an object that contains the target - // offset? - for _, attr := range tv.Attrs { - k := attr.Name - av := attr.Value - - switch av.(type) { - case *objectVal, *arrayVal: - // okay - default: - continue - } - - if av.Range().ContainsOffset(offset) { - return append(navigationStepsRev(av, offset), "."+k) - } - } - case *arrayVal: - // Do any of our elements contain the target offset? - for i, elem := range tv.Values { - - switch elem.(type) { - case *objectVal, *arrayVal: - // okay - default: - continue - } - - if elem.Range().ContainsOffset(offset) { - return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i)) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/parser.go b/vendor/github.com/hashicorp/hcl/v2/json/parser.go deleted file mode 100644 index 7a54c51b..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/parser.go +++ /dev/null @@ -1,496 +0,0 @@ -package json - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) { - tokens := scan(buf, pos{ - Filename: filename, - Pos: hcl.Pos{ - Byte: 0, - Line: 1, - Column: 1, - }, - }) - p := newPeeker(tokens) - node, diags := parseValue(p) - if len(diags) == 0 && p.Peek().Type != tokenEOF { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous data after value", - Detail: "Extra characters appear after the JSON value.", - Subject: p.Peek().Range.Ptr(), - }) - } - return node, diags -} - -func parseValue(p *peeker) (node, hcl.Diagnostics) { - tok := p.Peek() - - wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) { - if n != nil { - return n, diags - } - return invalidVal{tok.Range}, diags - } - - switch tok.Type { - case tokenBraceO: - return wrapInvalid(parseObject(p)) - case tokenBrackO: - return wrapInvalid(parseArray(p)) - case tokenNumber: - return wrapInvalid(parseNumber(p)) - case tokenString: - return wrapInvalid(parseString(p)) - case tokenKeyword: - return wrapInvalid(parseKeyword(p)) - case tokenBraceC: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing JSON value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - case tokenBrackC: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing array element value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - case tokenEOF: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing value", - Detail: "The JSON data ends prematurely.", - Subject: &tok.Range, - }, - }) - default: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid start of value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - } -} - -func tokenCanStartValue(tok token) bool { - switch tok.Type { - case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword: - return true - default: - return false - } -} - -func parseObject(p *peeker) (node, hcl.Diagnostics) { - var diags hcl.Diagnostics - - open := p.Read() - attrs := []*objectAttr{} - - // recover is used to shift the peeker to what seems to be the end of - // our object, so that when we encounter an error we leave the peeker - // at a reasonable point in the token stream to continue parsing. - recover := func(tok token) { - open := 1 - for { - switch tok.Type { - case tokenBraceO: - open++ - case tokenBraceC: - open-- - if open <= 1 { - return - } - case tokenEOF: - // Ran out of source before we were able to recover, - // so we'll bail here and let the caller deal with it. - return - } - tok = p.Read() - } - } - -Token: - for { - if p.Peek().Type == tokenBraceC { - break Token - } - - keyNode, keyDiags := parseValue(p) - diags = diags.Extend(keyDiags) - if keyNode == nil { - return nil, diags - } - - keyStrNode, ok := keyNode.(*stringVal) - if !ok { - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object property name", - Detail: "A JSON object property name must be a string", - Subject: keyNode.StartRange().Ptr(), - }) - } - - key := keyStrNode.Value - - colon := p.Read() - if colon.Type != tokenColon { - recover(colon) - - if colon.Type == tokenBraceC || colon.Type == tokenComma { - // Catch common mistake of using braces instead of brackets - // for an object. - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing object value", - Detail: "A JSON object attribute must have a value, introduced by a colon.", - Subject: &colon.Range, - }) - } - - if colon.Type == tokenEquals { - // Possible confusion with native HCL syntax. - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing property value colon", - Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.", - Subject: &colon.Range, - }) - } - - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing property value colon", - Detail: "A colon must appear between an object property's name and its value.", - Subject: &colon.Range, - }) - } - - valNode, valDiags := parseValue(p) - diags = diags.Extend(valDiags) - if valNode == nil { - return nil, diags - } - - attrs = append(attrs, &objectAttr{ - Name: key, - Value: valNode, - NameRange: keyStrNode.SrcRange, - }) - - switch p.Peek().Type { - case tokenComma: - comma := p.Read() - if p.Peek().Type == tokenBraceC { - // Special error message for this common mistake - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Trailing comma in object", - Detail: "JSON does not permit a trailing comma after the final property in an object.", - Subject: &comma.Range, - }) - } - continue Token - case tokenEOF: - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unclosed object", - Detail: "No closing brace was found for this JSON object.", - Subject: &open.Range, - }) - case tokenBrackC: - // Consume the bracket anyway, so that we don't return with the peeker - // at a strange place. - p.Read() - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Mismatched braces", - Detail: "A JSON object must be closed with a brace, not a bracket.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenBraceC: - break Token - default: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute seperator comma", - Detail: "A comma must appear between each property definition in an object.", - Subject: p.Peek().Range.Ptr(), - }) - } - - } - - close := p.Read() - return &objectVal{ - Attrs: attrs, - SrcRange: hcl.RangeBetween(open.Range, close.Range), - OpenRange: open.Range, - CloseRange: close.Range, - }, diags -} - -func parseArray(p *peeker) (node, hcl.Diagnostics) { - var diags hcl.Diagnostics - - open := p.Read() - vals := []node{} - - // recover is used to shift the peeker to what seems to be the end of - // our array, so that when we encounter an error we leave the peeker - // at a reasonable point in the token stream to continue parsing. - recover := func(tok token) { - open := 1 - for { - switch tok.Type { - case tokenBrackO: - open++ - case tokenBrackC: - open-- - if open <= 1 { - return - } - case tokenEOF: - // Ran out of source before we were able to recover, - // so we'll bail here and let the caller deal with it. - return - } - tok = p.Read() - } - } - -Token: - for { - if p.Peek().Type == tokenBrackC { - break Token - } - - valNode, valDiags := parseValue(p) - diags = diags.Extend(valDiags) - if valNode == nil { - return nil, diags - } - - vals = append(vals, valNode) - - switch p.Peek().Type { - case tokenComma: - comma := p.Read() - if p.Peek().Type == tokenBrackC { - // Special error message for this common mistake - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Trailing comma in array", - Detail: "JSON does not permit a trailing comma after the final value in an array.", - Subject: &comma.Range, - }) - } - continue Token - case tokenColon: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid array value", - Detail: "A colon is not used to introduce values in a JSON array.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenEOF: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unclosed object", - Detail: "No closing bracket was found for this JSON array.", - Subject: &open.Range, - }) - case tokenBraceC: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Mismatched brackets", - Detail: "A JSON array must be closed with a bracket, not a brace.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenBrackC: - break Token - default: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute seperator comma", - Detail: "A comma must appear between each value in an array.", - Subject: p.Peek().Range.Ptr(), - }) - } - - } - - close := p.Read() - return &arrayVal{ - Values: vals, - SrcRange: hcl.RangeBetween(open.Range, close.Range), - OpenRange: open.Range, - }, diags -} - -func parseNumber(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - - // Use encoding/json to validate the number syntax. - // TODO: Do this more directly to produce better diagnostics. - var num json.Number - err := json.Unmarshal(tok.Bytes, &num) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON number", - Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), - Subject: &tok.Range, - }, - } - } - - // We want to guarantee that we parse numbers the same way as cty (and thus - // native syntax HCL) would here, so we'll use the cty parser even though - // in most other cases we don't actually introduce cty concepts until - // decoding time. We'll unwrap the parsed float immediately afterwards, so - // the cty value is just a temporary helper. - nv, err := cty.ParseNumberVal(string(num)) - if err != nil { - // Should never happen if above passed, since JSON numbers are a subset - // of what cty can parse... - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON number", - Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), - Subject: &tok.Range, - }, - } - } - - return &numberVal{ - Value: nv.AsBigFloat(), - SrcRange: tok.Range, - }, nil -} - -func parseString(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - var str string - err := json.Unmarshal(tok.Bytes, &str) - - if err != nil { - var errRange hcl.Range - if serr, ok := err.(*json.SyntaxError); ok { - errOfs := serr.Offset - errPos := tok.Range.Start - errPos.Byte += int(errOfs) - - // TODO: Use the byte offset to properly count unicode - // characters for the column, and mark the whole of the - // character that was wrong as part of our range. - errPos.Column += int(errOfs) - - errEndPos := errPos - errEndPos.Byte++ - errEndPos.Column++ - - errRange = hcl.Range{ - Filename: tok.Range.Filename, - Start: errPos, - End: errEndPos, - } - } else { - errRange = tok.Range - } - - var contextRange *hcl.Range - if errRange != tok.Range { - contextRange = &tok.Range - } - - // FIXME: Eventually we should parse strings directly here so - // we can produce a more useful error message in the face fo things - // such as invalid escapes, etc. - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON string", - Detail: fmt.Sprintf("There is a syntax error in the given JSON string."), - Subject: &errRange, - Context: contextRange, - }, - } - } - - return &stringVal{ - Value: str, - SrcRange: tok.Range, - }, nil -} - -func parseKeyword(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - s := string(tok.Bytes) - - switch s { - case "true": - return &booleanVal{ - Value: true, - SrcRange: tok.Range, - }, nil - case "false": - return &booleanVal{ - Value: false, - SrcRange: tok.Range, - }, nil - case "null": - return &nullVal{ - SrcRange: tok.Range, - }, nil - case "undefined", "NaN", "Infinity": - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON keyword", - Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s), - Subject: &tok.Range, - }, - } - default: - var dym string - if suggest := keywordSuggestion(s); suggest != "" { - dym = fmt.Sprintf(" Did you mean %q?", suggest) - } - - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON keyword", - Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym), - Subject: &tok.Range, - }, - } - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/peeker.go b/vendor/github.com/hashicorp/hcl/v2/json/peeker.go deleted file mode 100644 index fc7bbf58..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/peeker.go +++ /dev/null @@ -1,25 +0,0 @@ -package json - -type peeker struct { - tokens []token - pos int -} - -func newPeeker(tokens []token) *peeker { - return &peeker{ - tokens: tokens, - pos: 0, - } -} - -func (p *peeker) Peek() token { - return p.tokens[p.pos] -} - -func (p *peeker) Read() token { - ret := p.tokens[p.pos] - if ret.Type != tokenEOF { - p.pos++ - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/public.go b/vendor/github.com/hashicorp/hcl/v2/json/public.go deleted file mode 100644 index 8dc4a36a..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/public.go +++ /dev/null @@ -1,94 +0,0 @@ -package json - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/hashicorp/hcl/v2" -) - -// Parse attempts to parse the given buffer as JSON and, if successful, returns -// a hcl.File for the HCL configuration represented by it. -// -// This is not a generic JSON parser. Instead, it deals only with the profile -// of JSON used to express HCL configuration. -// -// The returned file is valid only if the returned diagnostics returns false -// from its HasErrors method. If HasErrors returns true, the file represents -// the subset of data that was able to be parsed, which may be none. -func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - rootNode, diags := parseFileContent(src, filename) - - switch rootNode.(type) { - case *objectVal, *arrayVal: - // okay - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Root value must be object", - Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.", - Subject: rootNode.StartRange().Ptr(), - }) - - // Since we've already produced an error message for this being - // invalid, we'll return an empty placeholder here so that trying to - // extract content from our root body won't produce a redundant - // error saying the same thing again in more general terms. - fakePos := hcl.Pos{ - Byte: 0, - Line: 1, - Column: 1, - } - fakeRange := hcl.Range{ - Filename: filename, - Start: fakePos, - End: fakePos, - } - rootNode = &objectVal{ - Attrs: []*objectAttr{}, - SrcRange: fakeRange, - OpenRange: fakeRange, - } - } - - file := &hcl.File{ - Body: &body{ - val: rootNode, - }, - Bytes: src, - Nav: navigation{rootNode}, - } - return file, diags -} - -// ParseFile is a convenience wrapper around Parse that first attempts to load -// data from the given filename, passing the result to Parse if successful. -// -// If the file cannot be read, an error diagnostic with nil context is returned. -func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) { - f, err := os.Open(filename) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to open file", - Detail: fmt.Sprintf("The file %q could not be opened.", filename), - }, - } - } - defer f.Close() - - src, err := ioutil.ReadAll(f) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename), - }, - } - } - - return Parse(src, filename) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/scanner.go b/vendor/github.com/hashicorp/hcl/v2/json/scanner.go deleted file mode 100644 index 912eabce..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/scanner.go +++ /dev/null @@ -1,297 +0,0 @@ -package json - -import ( - "fmt" - - "github.com/apparentlymart/go-textseg/textseg" - "github.com/hashicorp/hcl/v2" -) - -//go:generate stringer -type tokenType scanner.go -type tokenType rune - -const ( - tokenBraceO tokenType = '{' - tokenBraceC tokenType = '}' - tokenBrackO tokenType = '[' - tokenBrackC tokenType = ']' - tokenComma tokenType = ',' - tokenColon tokenType = ':' - tokenKeyword tokenType = 'K' - tokenString tokenType = 'S' - tokenNumber tokenType = 'N' - tokenEOF tokenType = '␄' - tokenInvalid tokenType = 0 - tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax -) - -type token struct { - Type tokenType - Bytes []byte - Range hcl.Range -} - -// scan returns the primary tokens for the given JSON buffer in sequence. -// -// The responsibility of this pass is to just mark the slices of the buffer -// as being of various types. It is lax in how it interprets the multi-byte -// token types keyword, string and number, preferring to capture erroneous -// extra bytes that we presume the user intended to be part of the token -// so that we can generate more helpful diagnostics in the parser. -func scan(buf []byte, start pos) []token { - var tokens []token - p := start - for { - if len(buf) == 0 { - tokens = append(tokens, token{ - Type: tokenEOF, - Bytes: nil, - Range: posRange(p, p), - }) - return tokens - } - - buf, p = skipWhitespace(buf, p) - - if len(buf) == 0 { - tokens = append(tokens, token{ - Type: tokenEOF, - Bytes: nil, - Range: posRange(p, p), - }) - return tokens - } - - start = p - - first := buf[0] - switch { - case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=': - p.Pos.Column++ - p.Pos.Byte++ - tokens = append(tokens, token{ - Type: tokenType(first), - Bytes: buf[0:1], - Range: posRange(start, p), - }) - buf = buf[1:] - case first == '"': - var tokBuf []byte - tokBuf, buf, p = scanString(buf, p) - tokens = append(tokens, token{ - Type: tokenString, - Bytes: tokBuf, - Range: posRange(start, p), - }) - case byteCanStartNumber(first): - var tokBuf []byte - tokBuf, buf, p = scanNumber(buf, p) - tokens = append(tokens, token{ - Type: tokenNumber, - Bytes: tokBuf, - Range: posRange(start, p), - }) - case byteCanStartKeyword(first): - var tokBuf []byte - tokBuf, buf, p = scanKeyword(buf, p) - tokens = append(tokens, token{ - Type: tokenKeyword, - Bytes: tokBuf, - Range: posRange(start, p), - }) - default: - tokens = append(tokens, token{ - Type: tokenInvalid, - Bytes: buf[:1], - Range: start.Range(1, 1), - }) - // If we've encountered an invalid then we might as well stop - // scanning since the parser won't proceed beyond this point. - return tokens - } - } -} - -func byteCanStartNumber(b byte) bool { - switch b { - // We are slightly more tolerant than JSON requires here since we - // expect the parser will make a stricter interpretation of the - // number bytes, but we specifically don't allow 'e' or 'E' here - // since we want the scanner to treat that as the start of an - // invalid keyword instead, to produce more intelligible error messages. - case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - return true - default: - return false - } -} - -func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) { - // The scanner doesn't check that the sequence of digit-ish bytes is - // in a valid order. The parser must do this when decoding a number - // token. - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - switch buf[i] { - case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - p.Pos.Byte++ - p.Pos.Column++ - default: - break Byte - } - } - return buf[:i], buf[i:], p -} - -func byteCanStartKeyword(b byte) bool { - switch { - // We allow any sequence of alphabetical characters here, even though - // JSON is more constrained, so that we can collect what we presume - // the user intended to be a single keyword and then check its validity - // in the parser, where we can generate better diagnostics. - // So e.g. we want to be able to say: - // unrecognized keyword "True". Did you mean "true"? - case isAlphabetical(b): - return true - default: - return false - } -} - -func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) { - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - b := buf[i] - switch { - case isAlphabetical(b) || b == '_': - p.Pos.Byte++ - p.Pos.Column++ - default: - break Byte - } - } - return buf[:i], buf[i:], p -} - -func scanString(buf []byte, start pos) ([]byte, []byte, pos) { - // The scanner doesn't validate correct use of escapes, etc. It pays - // attention to escapes only for the purpose of identifying the closing - // quote character. It's the parser's responsibility to do proper - // validation. - // - // The scanner also doesn't specifically detect unterminated string - // literals, though they can be identified in the parser by checking if - // the final byte in a string token is the double-quote character. - - // Skip the opening quote symbol - i := 1 - p := start - p.Pos.Byte++ - p.Pos.Column++ - escaping := false -Byte: - for i < len(buf) { - b := buf[i] - - switch { - case b == '\\': - escaping = !escaping - p.Pos.Byte++ - p.Pos.Column++ - i++ - case b == '"': - p.Pos.Byte++ - p.Pos.Column++ - i++ - if !escaping { - break Byte - } - escaping = false - case b < 32: - break Byte - default: - // Advance by one grapheme cluster, so that we consider each - // grapheme to be a "column". - // Ignoring error because this scanner cannot produce errors. - advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true) - - p.Pos.Byte += advance - p.Pos.Column++ - i += advance - - escaping = false - } - } - return buf[:i], buf[i:], p -} - -func skipWhitespace(buf []byte, start pos) ([]byte, pos) { - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - switch buf[i] { - case ' ': - p.Pos.Byte++ - p.Pos.Column++ - case '\n': - p.Pos.Byte++ - p.Pos.Column = 1 - p.Pos.Line++ - case '\r': - // For the purpose of line/column counting we consider a - // carriage return to take up no space, assuming that it will - // be paired up with a newline (on Windows, for example) that - // will account for both of them. - p.Pos.Byte++ - case '\t': - // We arbitrarily count a tab as if it were two spaces, because - // we need to choose _some_ number here. This means any system - // that renders code on-screen with markers must itself treat - // tabs as a pair of spaces for rendering purposes, or instead - // use the byte offset and back into its own column position. - p.Pos.Byte++ - p.Pos.Column += 2 - default: - break Byte - } - } - return buf[i:], p -} - -type pos struct { - Filename string - Pos hcl.Pos -} - -func (p *pos) Range(byteLen, charLen int) hcl.Range { - start := p.Pos - end := p.Pos - end.Byte += byteLen - end.Column += charLen - return hcl.Range{ - Filename: p.Filename, - Start: start, - End: end, - } -} - -func posRange(start, end pos) hcl.Range { - return hcl.Range{ - Filename: start.Filename, - Start: start.Pos, - End: end.Pos, - } -} - -func (t token) GoString() string { - return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) -} - -func isAlphabetical(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/spec.md b/vendor/github.com/hashicorp/hcl/v2/json/spec.md deleted file mode 100644 index dac5729d..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/spec.md +++ /dev/null @@ -1,405 +0,0 @@ -# HCL JSON Syntax Specification - -This is the specification for the JSON serialization for hcl. HCL is a system -for defining configuration languages for applications. The HCL information -model is designed to support multiple concrete syntaxes for configuration, -and this JSON-based format complements [the native syntax](../hclsyntax/spec.md) -by being easy to machine-generate, whereas the native syntax is oriented -towards human authoring and maintenance - -This syntax is defined in terms of JSON as defined in -[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON -grammar as-is, and merely defines a specific methodology for interpreting -JSON constructs into HCL structural elements and expressions. - -This mapping is defined such that valid JSON-serialized HCL input can be -_produced_ using standard JSON implementations in various programming languages. -_Parsing_ such JSON has some additional constraints not beyond what is normally -supported by JSON parsers, so a specialized parser may be required that -is able to: - -- Preserve the relative ordering of properties defined in an object. -- Preserve multiple definitions of the same property name. -- Preserve numeric values to the precision required by the number type - in [the HCL syntax-agnostic information model](../spec.md). -- Retain source location information for parsed tokens/constructs in order - to produce good error messages. - -## Structural Elements - -[The HCL syntax-agnostic information model](../spec.md) defines a _body_ as an -abstract container for attribute definitions and child blocks. A body is -represented in JSON as either a single JSON object or a JSON array of objects. - -Body processing is in terms of JSON object properties, visited in the order -they appear in the input. Where a body is represented by a single JSON object, -the properties of that object are visited in order. Where a body is -represented by a JSON array, each of its elements are visited in order and -each element has its properties visited in order. If any element of the array -is not a JSON object then the input is erroneous. - -When a body is being processed in the _dynamic attributes_ mode, the allowance -of a JSON array in the previous paragraph does not apply and instead a single -JSON object is always required. - -As defined in the language-agnostic model, body processing is in terms -of a schema which provides context for interpreting the body's content. For -JSON bodies, the schema is crucial to allow differentiation of attribute -definitions and block definitions, both of which are represented via object -properties. - -The special property name `"//"`, when used in an object representing a HCL -body, is parsed and ignored. A property with this name can be used to -include human-readable comments. (This special property name is _not_ -processed in this way for any _other_ HCL constructs that are represented as -JSON objects.) - -### Attributes - -Where the given schema describes an attribute with a given name, the object -property with the matching name — if present — serves as the attribute's -definition. - -When a body is being processed in the _dynamic attributes_ mode, each object -property serves as an attribute definition for the attribute whose name -matches the property name. - -The value of an attribute definition property is interpreted as an _expression_, -as described in a later section. - -Given a schema that calls for an attribute named "foo", a JSON object like -the following provides a definition for that attribute: - -```json -{ - "foo": "bar baz" -} -``` - -### Blocks - -Where the given schema describes a block with a given type name, each object -property with the matching name serves as a definition of zero or more blocks -of that type. - -Processing of child blocks is in terms of nested JSON objects and arrays. -If the schema defines one or more _labels_ for the block type, a nested JSON -object or JSON array of objects is required for each labelling level. These -are flattened to a single ordered sequence of object properties using the -same algorithm as for body content as defined above. Each object property -serves as a label value at the corresponding level. - -After any labelling levels, the next nested value is either a JSON object -representing a single block body, or a JSON array of JSON objects that each -represent a single block body. Use of an array accommodates the definition -of multiple blocks that have identical type and labels. - -Given a schema that calls for a block type named "foo" with no labels, the -following JSON objects are all valid definitions of zero or more blocks of this -type: - -```json -{ - "foo": { - "child_attr": "baz" - } -} -``` - -```json -{ - "foo": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] -} -``` - -```json -{ - "foo": [] -} -``` - -The first of these defines a single child block of type "foo". The second -defines _two_ such blocks. The final example shows a degenerate definition -of zero blocks, though generators should prefer to omit the property entirely -in this scenario. - -Given a schema that calls for a block type named "foo" with _two_ labels, the -extra label levels must be represented as objects or arrays of objects as in -the following examples: - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "boz": { - "baz": { - "child_attr": "baz" - } - } - } -} -``` - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "boz": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } -} -``` - -```json -{ - "foo": [ - { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - } - }, - { - "bar": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } - ] -} -``` - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "bar": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } -} -``` - -Arrays can be introduced at either the label definition or block body -definition levels to define multiple definitions of the same block type -or labels while preserving order. - -A JSON HCL parser _must_ support duplicate definitions of the same property -name within a single object, preserving all of them and the relative ordering -between them. The array-based forms are also required so that JSON HCL -configurations can be produced with JSON producing libraries that are not -able to preserve property definition order and multiple definitions of -the same property. - -## Expressions - -JSON lacks a native expression syntax, so the HCL JSON syntax instead defines -a mapping for each of the JSON value types, including a special mapping for -strings that allows optional use of arbitrary expressions. - -### Objects - -When interpreted as an expression, a JSON object represents a value of a HCL -object type. - -Each property of the JSON object represents an attribute of the HCL object type. -The property name string given in the JSON input is interpreted as a string -expression as described below, and its result is converted to string as defined -by the syntax-agnostic information model. If such a conversion is not possible, -an error is produced and evaluation fails. - -An instance of the constructed object type is then created, whose values -are interpreted by again recursively applying the mapping rules defined in -this section to each of the property values. - -If any evaluated property name strings produce null values, an error is -produced and evaluation fails. If any produce _unknown_ values, the _entire -object's_ result is an unknown value of the dynamic pseudo-type, signalling -that the type of the object cannot be determined. - -It is an error to define the same property name multiple times within a single -JSON object interpreted as an expression. In full expression mode, this -constraint applies to the name expression results after conversion to string, -rather than the raw string that may contain interpolation expressions. - -### Arrays - -When interpreted as an expression, a JSON array represents a value of a HCL -tuple type. - -Each element of the JSON array represents an element of the HCL tuple type. -The tuple type is constructed by enumerating the JSON array elements, creating -for each an element whose type is the result of recursively applying the -expression mapping rules. Correspondence is preserved between the array element -indices and the tuple element indices. - -An instance of the constructed tuple type is then created, whose values are -interpreted by again recursively applying the mapping rules defined in this -section. - -### Numbers - -When interpreted as an expression, a JSON number represents a HCL number value. - -HCL numbers are arbitrary-precision decimal values, so a JSON HCL parser must -be able to translate exactly the value given to a number of corresponding -precision, within the constraints set by the HCL syntax-agnostic information -model. - -In practice, off-the-shelf JSON serializers often do not support customizing the -processing of numbers, and instead force processing as 32-bit or 64-bit -floating point values. - -A _producer_ of JSON HCL that uses such a serializer can provide numeric values -as JSON strings where they have precision too great for representation in the -serializer's chosen numeric type in situations where the result will be -converted to number (using the standard conversion rules) by a calling -application. - -Alternatively, for expressions that are evaluated in full expression mode an -embedded template interpolation can be used to faithfully represent a number, -such as `"${1e150}"`, which will then be evaluated by the underlying HCL native -syntax expression evaluator. - -### Boolean Values - -The JSON boolean values `true` and `false`, when interpreted as expressions, -represent the corresponding HCL boolean values. - -### The Null Value - -The JSON value `null`, when interpreted as an expression, represents a -HCL null value of the dynamic pseudo-type. - -### Strings - -When interpreted as an expression, a JSON string may be interpreted in one of -two ways depending on the evaluation mode. - -If evaluating in literal-only mode (as defined by the syntax-agnostic -information model) the literal string is intepreted directly as a HCL string -value, by directly using the exact sequence of unicode characters represented. -Template interpolations and directives MUST NOT be processed in this mode, -allowing any characters that appear as introduction sequences to pass through -literally: - -```json -"Hello world! Template sequences like ${ are not intepreted here." -``` - -When evaluating in full expression mode (again, as defined by the syntax- -agnostic information model) the literal string is instead interpreted as a -_standalone template_ in the HCL Native Syntax. The expression evaluation -result is then the direct result of evaluating that template with the current -variable scope and function table. - -```json -"Hello, ${name}! Template sequences are interpreted in full expression mode." -``` - -In particular the _Template Interpolation Unwrapping_ requirement from the -HCL native syntax specification must be implemented, allowing the use of -single-interpolation templates to represent expressions that would not -otherwise be representable in JSON, such as the following example where -the result must be a number, rather than a string representation of a number: - -```json -"${ a + b }" -``` - -## Static Analysis - -The HCL static analysis operations are implemented for JSON values that -represent expressions, as described in the following sections. - -Due to the limited expressive power of the JSON syntax alone, use of these -static analyses functions rather than normal expression evaluation is used -as additional context for how a JSON value is to be interpreted, which means -that static analyses can result in a different interpretation of a given -expression than normal evaluation. - -### Static List - -An expression interpreted as a static list must be a JSON array. Each of the -values in the array is interpreted as an expression and returned. - -### Static Map - -An expression interpreted as a static map must be a JSON object. Each of the -key/value pairs in the object is presented as a pair of expressions. Since -object property names are always strings, evaluating the key expression with -a non-`nil` evaluation context will evaluate any template sequences given -in the property name. - -### Static Call - -An expression interpreted as a static call must be a string. The content of -the string is interpreted as a native syntax expression (not a _template_, -unlike normal evaluation) and then the static call analysis is delegated to -that expression. - -If the original expression is not a string or its contents cannot be parsed -as a native syntax expression then static call analysis is not supported. - -### Static Traversal - -An expression interpreted as a static traversal must be a string. The content -of the string is interpreted as a native syntax expression (not a _template_, -unlike normal evaluation) and then static traversal analysis is delegated -to that expression. - -If the original expression is not a string or its contents cannot be parsed -as a native syntax expression then static call analysis is not supported. diff --git a/vendor/github.com/hashicorp/hcl/v2/json/structure.go b/vendor/github.com/hashicorp/hcl/v2/json/structure.go deleted file mode 100644 index 76c9d739..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/structure.go +++ /dev/null @@ -1,637 +0,0 @@ -package json - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// body is the implementation of "Body" used for files processed with the JSON -// parser. -type body struct { - val node - - // If non-nil, the keys of this map cause the corresponding attributes to - // be treated as non-existing. This is used when Body.PartialContent is - // called, to produce the "remaining content" Body. - hiddenAttrs map[string]struct{} -} - -// expression is the implementation of "Expression" used for files processed -// with the JSON parser. -type expression struct { - src node -} - -func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, newBody, diags := b.PartialContent(schema) - - hiddenAttrs := newBody.(*body).hiddenAttrs - - var nameSuggestions []string - for _, attrS := range schema.Attributes { - if _, ok := hiddenAttrs[attrS.Name]; !ok { - // Only suggest an attribute name if we didn't use it already. - nameSuggestions = append(nameSuggestions, attrS.Name) - } - } - for _, blockS := range schema.Blocks { - // Blocks can appear multiple times, so we'll suggest their type - // names regardless of whether they've already been used. - nameSuggestions = append(nameSuggestions, blockS.Type) - } - - jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) - diags = append(diags, attrDiags...) - - for _, attr := range jsonAttrs { - k := attr.Name - if k == "//" { - // Ignore "//" keys in objects representing bodies, to allow - // their use as comments. - continue - } - - if _, ok := hiddenAttrs[k]; !ok { - suggestion := nameSuggestion(k, nameSuggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous JSON object property", - Detail: fmt.Sprintf("No argument or block type is named %q.%s", k, suggestion), - Subject: &attr.NameRange, - Context: attr.Range().Ptr(), - }) - } - } - - return content, diags -} - -func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - - jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) - diags = append(diags, attrDiags...) - - usedNames := map[string]struct{}{} - if b.hiddenAttrs != nil { - for k := range b.hiddenAttrs { - usedNames[k] = struct{}{} - } - } - - content := &hcl.BodyContent{ - Attributes: map[string]*hcl.Attribute{}, - Blocks: nil, - - MissingItemRange: b.MissingItemRange(), - } - - // Create some more convenient data structures for our work below. - attrSchemas := map[string]hcl.AttributeSchema{} - blockSchemas := map[string]hcl.BlockHeaderSchema{} - for _, attrS := range schema.Attributes { - attrSchemas[attrS.Name] = attrS - } - for _, blockS := range schema.Blocks { - blockSchemas[blockS.Type] = blockS - } - - for _, jsonAttr := range jsonAttrs { - attrName := jsonAttr.Name - if _, used := b.hiddenAttrs[attrName]; used { - continue - } - - if attrS, defined := attrSchemas[attrName]; defined { - if existing, exists := content.Attributes[attrName]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate argument", - Detail: fmt.Sprintf("The argument %q was already set at %s.", attrName, existing.Range), - Subject: &jsonAttr.NameRange, - Context: jsonAttr.Range().Ptr(), - }) - continue - } - - content.Attributes[attrS.Name] = &hcl.Attribute{ - Name: attrS.Name, - Expr: &expression{src: jsonAttr.Value}, - Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), - NameRange: jsonAttr.NameRange, - } - usedNames[attrName] = struct{}{} - - } else if blockS, defined := blockSchemas[attrName]; defined { - bv := jsonAttr.Value - blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks) - diags = append(diags, blockDiags...) - usedNames[attrName] = struct{}{} - } - - // We ignore anything that isn't defined because that's the - // PartialContent contract. The Content method will catch leftovers. - } - - // Make sure we got all the required attributes. - for _, attrS := range schema.Attributes { - if !attrS.Required { - continue - } - if _, defined := content.Attributes[attrS.Name]; !defined { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing required argument", - Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), - Subject: b.MissingItemRange().Ptr(), - }) - } - } - - unusedBody := &body{ - val: b.val, - hiddenAttrs: usedNames, - } - - return content, unusedBody, diags -} - -// JustAttributes for JSON bodies interprets all properties of the wrapped -// JSON object as attributes and returns them. -func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - var diags hcl.Diagnostics - attrs := make(map[string]*hcl.Attribute) - - obj, ok := b.val.(*objectVal) - if !ok { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "A JSON object is required here, setting the arguments for this block.", - Subject: b.val.StartRange().Ptr(), - }) - return attrs, diags - } - - for _, jsonAttr := range obj.Attrs { - name := jsonAttr.Name - if name == "//" { - // Ignore "//" keys in objects representing bodies, to allow - // their use as comments. - continue - } - - if _, hidden := b.hiddenAttrs[name]; hidden { - continue - } - - if existing, exists := attrs[name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate attribute definition", - Detail: fmt.Sprintf("The argument %q was already set at %s.", name, existing.Range), - Subject: &jsonAttr.NameRange, - }) - continue - } - - attrs[name] = &hcl.Attribute{ - Name: name, - Expr: &expression{src: jsonAttr.Value}, - Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), - NameRange: jsonAttr.NameRange, - } - } - - // No diagnostics possible here, since the parser already took care of - // finding duplicates and every JSON value can be a valid attribute value. - return attrs, diags -} - -func (b *body) MissingItemRange() hcl.Range { - switch tv := b.val.(type) { - case *objectVal: - return tv.CloseRange - case *arrayVal: - return tv.OpenRange - default: - // Should not happen in correct operation, but might show up if the - // input is invalid and we are producing partial results. - return tv.StartRange() - } -} - -func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) { - if len(labelsLeft) > 0 { - labelName := labelsLeft[0] - jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName) - diags = append(diags, attrDiags...) - - if len(jsonAttrs) == 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing block label", - Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName), - Subject: v.StartRange().Ptr(), - }) - return - } - labelsUsed := append(labelsUsed, "") - labelRanges := append(labelRanges, hcl.Range{}) - for _, p := range jsonAttrs { - pk := p.Name - labelsUsed[len(labelsUsed)-1] = pk - labelRanges[len(labelRanges)-1] = p.NameRange - diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...) - } - return - } - - // By the time we get here, we've peeled off all the labels and we're ready - // to deal with the block's actual content. - - // need to copy the label slices because their underlying arrays will - // continue to be mutated after we return. - labels := make([]string, len(labelsUsed)) - copy(labels, labelsUsed) - labelR := make([]hcl.Range, len(labelRanges)) - copy(labelR, labelRanges) - - switch tv := v.(type) { - case *nullVal: - // There is no block content, e.g the value is null. - return - case *objectVal: - // Single instance of the block - *blocks = append(*blocks, &hcl.Block{ - Type: typeName, - Labels: labels, - Body: &body{ - val: tv, - }, - - DefRange: tv.OpenRange, - TypeRange: *typeRange, - LabelRanges: labelR, - }) - case *arrayVal: - // Multiple instances of the block - for _, av := range tv.Values { - *blocks = append(*blocks, &hcl.Block{ - Type: typeName, - Labels: labels, - Body: &body{ - val: av, // might be mistyped; we'll find out when content is requested for this body - }, - - DefRange: tv.OpenRange, - TypeRange: *typeRange, - LabelRanges: labelR, - }) - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName), - Subject: v.StartRange().Ptr(), - }) - } - return -} - -// collectDeepAttrs takes either a single object or an array of objects and -// flattens it into a list of object attributes, collecting attributes from -// all of the objects in a given array. -// -// Ordering is preserved, so a list of objects that each have one property -// will result in those properties being returned in the same order as the -// objects appeared in the array. -// -// This is appropriate for use only for objects representing bodies or labels -// within a block. -// -// The labelName argument, if non-null, is used to tailor returned error -// messages to refer to block labels rather than attributes and child blocks. -// It has no other effect. -func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) { - var diags hcl.Diagnostics - var attrs []*objectAttr - - switch tv := v.(type) { - case *nullVal: - // If a value is null, then we don't return any attributes or return an error. - - case *objectVal: - attrs = append(attrs, tv.Attrs...) - - case *arrayVal: - for _, ev := range tv.Values { - switch tev := ev.(type) { - case *objectVal: - attrs = append(attrs, tev.Attrs...) - default: - if labelName != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName), - Subject: ev.StartRange().Ptr(), - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "A JSON object is required here, to define arguments and child blocks.", - Subject: ev.StartRange().Ptr(), - }) - } - } - } - - default: - if labelName != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName), - Subject: v.StartRange().Ptr(), - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "Either a JSON object or JSON array of objects is required here, to define arguments and child blocks.", - Subject: v.StartRange().Ptr(), - }) - } - } - - return attrs, diags -} - -func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - switch v := e.src.(type) { - case *stringVal: - if ctx != nil { - // Parse string contents as a HCL native language expression. - // We only do this if we have a context, so passing a nil context - // is how the caller specifies that interpolations are not allowed - // and that the string should just be returned verbatim. - templateSrc := v.Value - expr, diags := hclsyntax.ParseTemplate( - []byte(templateSrc), - v.SrcRange.Filename, - - // This won't produce _exactly_ the right result, since - // the hclsyntax parser can't "see" any escapes we removed - // while parsing JSON, but it's better than nothing. - hcl.Pos{ - Line: v.SrcRange.Start.Line, - - // skip over the opening quote mark - Byte: v.SrcRange.Start.Byte + 1, - Column: v.SrcRange.Start.Column + 1, - }, - ) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - val, evalDiags := expr.Value(ctx) - diags = append(diags, evalDiags...) - return val, diags - } - - return cty.StringVal(v.Value), nil - case *numberVal: - return cty.NumberVal(v.Value), nil - case *booleanVal: - return cty.BoolVal(v.Value), nil - case *arrayVal: - var diags hcl.Diagnostics - vals := []cty.Value{} - for _, jsonVal := range v.Values { - val, valDiags := (&expression{src: jsonVal}).Value(ctx) - vals = append(vals, val) - diags = append(diags, valDiags...) - } - return cty.TupleVal(vals), diags - case *objectVal: - var diags hcl.Diagnostics - attrs := map[string]cty.Value{} - attrRanges := map[string]hcl.Range{} - known := true - for _, jsonAttr := range v.Attrs { - // In this one context we allow keys to contain interpolation - // expressions too, assuming we're evaluating in interpolation - // mode. This achieves parity with the native syntax where - // object expressions can have dynamic keys, while block contents - // may not. - name, nameDiags := (&expression{src: &stringVal{ - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - }}).Value(ctx) - valExpr := &expression{src: jsonAttr.Value} - val, valDiags := valExpr.Value(ctx) - diags = append(diags, nameDiags...) - diags = append(diags, valDiags...) - - var err error - name, err = convert.Convert(name, cty.String) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object key expression", - Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err), - Subject: &jsonAttr.NameRange, - Expression: valExpr, - EvalContext: ctx, - }) - continue - } - if name.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object key expression", - Detail: "Cannot use null value as an object key.", - Subject: &jsonAttr.NameRange, - Expression: valExpr, - EvalContext: ctx, - }) - continue - } - if !name.IsKnown() { - // This is a bit of a weird case, since our usual rules require - // us to tolerate unknowns and just represent the result as - // best we can but if we don't know the key then we can't - // know the type of our object at all, and thus we must turn - // the whole thing into cty.DynamicVal. This is consistent with - // how this situation is handled in the native syntax. - // We'll keep iterating so we can collect other errors in - // subsequent attributes. - known = false - continue - } - nameStr := name.AsString() - if _, defined := attrs[nameStr]; defined { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate object attribute", - Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]), - Subject: &jsonAttr.NameRange, - Expression: e, - EvalContext: ctx, - }) - continue - } - attrs[nameStr] = val - attrRanges[nameStr] = jsonAttr.NameRange - } - if !known { - // We encountered an unknown key somewhere along the way, so - // we can't know what our type will eventually be. - return cty.DynamicVal, diags - } - return cty.ObjectVal(attrs), diags - case *nullVal: - return cty.NullVal(cty.DynamicPseudoType), nil - default: - // Default to DynamicVal so that ASTs containing invalid nodes can - // still be partially-evaluated. - return cty.DynamicVal, nil - } -} - -func (e *expression) Variables() []hcl.Traversal { - var vars []hcl.Traversal - - switch v := e.src.(type) { - case *stringVal: - templateSrc := v.Value - expr, diags := hclsyntax.ParseTemplate( - []byte(templateSrc), - v.SrcRange.Filename, - - // This won't produce _exactly_ the right result, since - // the hclsyntax parser can't "see" any escapes we removed - // while parsing JSON, but it's better than nothing. - hcl.Pos{ - Line: v.SrcRange.Start.Line, - - // skip over the opening quote mark - Byte: v.SrcRange.Start.Byte + 1, - Column: v.SrcRange.Start.Column + 1, - }, - ) - if diags.HasErrors() { - return vars - } - return expr.Variables() - - case *arrayVal: - for _, jsonVal := range v.Values { - vars = append(vars, (&expression{src: jsonVal}).Variables()...) - } - case *objectVal: - for _, jsonAttr := range v.Attrs { - keyExpr := &stringVal{ // we're going to treat key as an expression in this context - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - } - vars = append(vars, (&expression{src: keyExpr}).Variables()...) - vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) - } - } - - return vars -} - -func (e *expression) Range() hcl.Range { - return e.src.Range() -} - -func (e *expression) StartRange() hcl.Range { - return e.src.StartRange() -} - -// Implementation for hcl.AbsTraversalForExpr. -func (e *expression) AsTraversal() hcl.Traversal { - // In JSON-based syntax a traversal is given as a string containing - // traversal syntax as defined by hclsyntax.ParseTraversalAbs. - - switch v := e.src.(type) { - case *stringVal: - traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) - if diags.HasErrors() { - return nil - } - return traversal - default: - return nil - } -} - -// Implementation for hcl.ExprCall. -func (e *expression) ExprCall() *hcl.StaticCall { - // In JSON-based syntax a static call is given as a string containing - // an expression in the native syntax that also supports ExprCall. - - switch v := e.src.(type) { - case *stringVal: - expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) - if diags.HasErrors() { - return nil - } - - call, diags := hcl.ExprCall(expr) - if diags.HasErrors() { - return nil - } - - return call - default: - return nil - } -} - -// Implementation for hcl.ExprList. -func (e *expression) ExprList() []hcl.Expression { - switch v := e.src.(type) { - case *arrayVal: - ret := make([]hcl.Expression, len(v.Values)) - for i, node := range v.Values { - ret[i] = &expression{src: node} - } - return ret - default: - return nil - } -} - -// Implementation for hcl.ExprMap. -func (e *expression) ExprMap() []hcl.KeyValuePair { - switch v := e.src.(type) { - case *objectVal: - ret := make([]hcl.KeyValuePair, len(v.Attrs)) - for i, jsonAttr := range v.Attrs { - ret[i] = hcl.KeyValuePair{ - Key: &expression{src: &stringVal{ - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - }}, - Value: &expression{src: jsonAttr.Value}, - } - } - return ret - default: - return nil - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go b/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go deleted file mode 100644 index bbcce5b3..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT. - -package json - -import "strconv" - -const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF" - -var _tokenType_map = map[tokenType]string{ - 0: _tokenType_name[0:12], - 44: _tokenType_name[12:22], - 58: _tokenType_name[22:32], - 61: _tokenType_name[32:43], - 75: _tokenType_name[43:55], - 78: _tokenType_name[55:66], - 83: _tokenType_name[66:77], - 91: _tokenType_name[77:88], - 93: _tokenType_name[88:99], - 123: _tokenType_name[99:110], - 125: _tokenType_name[110:121], - 9220: _tokenType_name[121:129], -} - -func (i tokenType) String() string { - if str, ok := _tokenType_map[i]; ok { - return str - } - return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE deleted file mode 100644 index 82b4de97..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go deleted file mode 100644 index d9d27625..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go +++ /dev/null @@ -1,138 +0,0 @@ -package tfconfig - -import ( - "fmt" - - legacyhclparser "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/v2" -) - -// Diagnostic describes a problem (error or warning) encountered during -// configuration loading. -type Diagnostic struct { - Severity DiagSeverity `json:"severity"` - Summary string `json:"summary"` - Detail string `json:"detail,omitempty"` - - // Pos is not populated for all diagnostics, but when populated should - // indicate a particular line that the described problem relates to. - Pos *SourcePos `json:"pos,omitempty"` -} - -// Diagnostics represents a sequence of diagnostics. This is the type that -// should be returned from a function that might generate diagnostics. -type Diagnostics []Diagnostic - -// HasErrors returns true if there is at least one Diagnostic of severity -// DiagError in the receiever. -// -// If a function returns a Diagnostics without errors then the result can -// be assumed to be complete within the "best effort" constraints of this -// library. If errors are present then the caller may wish to employ more -// caution in relying on the result. -func (diags Diagnostics) HasErrors() bool { - for _, diag := range diags { - if diag.Severity == DiagError { - return true - } - } - return false -} - -func (diags Diagnostics) Error() string { - switch len(diags) { - case 0: - return "no problems" - case 1: - return fmt.Sprintf("%s: %s", diags[0].Summary, diags[0].Detail) - default: - return fmt.Sprintf("%s: %s (and %d other messages)", diags[0].Summary, diags[0].Detail, len(diags)-1) - } -} - -// Err returns an error representing the receiver if the receiver HasErrors, or -// nil otherwise. -// -// The returned error can be type-asserted back to a Diagnostics if needed. -func (diags Diagnostics) Err() error { - if diags.HasErrors() { - return diags - } - return nil -} - -// DiagSeverity describes the severity of a Diagnostic. -type DiagSeverity rune - -// DiagError indicates a problem that prevented proper processing of the -// configuration. In the precense of DiagError diagnostics the result is -// likely to be incomplete. -const DiagError DiagSeverity = 'E' - -// DiagWarning indicates a problem that the user may wish to consider but -// that did not prevent proper processing of the configuration. -const DiagWarning DiagSeverity = 'W' - -// MarshalJSON is an implementation of encoding/json.Marshaler -func (s DiagSeverity) MarshalJSON() ([]byte, error) { - switch s { - case DiagError: - return []byte(`"error"`), nil - case DiagWarning: - return []byte(`"warning"`), nil - default: - return []byte(`"invalid"`), nil - } -} - -func diagnosticsHCL(diags hcl.Diagnostics) Diagnostics { - if len(diags) == 0 { - return nil - } - ret := make(Diagnostics, len(diags)) - for i, diag := range diags { - ret[i] = Diagnostic{ - Summary: diag.Summary, - Detail: diag.Detail, - } - switch diag.Severity { - case hcl.DiagError: - ret[i].Severity = DiagError - case hcl.DiagWarning: - ret[i].Severity = DiagWarning - } - if diag.Subject != nil { - pos := sourcePosHCL(*diag.Subject) - ret[i].Pos = &pos - } - } - return ret -} - -func diagnosticsError(err error) Diagnostics { - if err == nil { - return nil - } - - if posErr, ok := err.(*legacyhclparser.PosError); ok { - pos := sourcePosLegacyHCL(posErr.Pos, "") - return Diagnostics{ - Diagnostic{ - Severity: DiagError, - Summary: posErr.Err.Error(), - Pos: &pos, - }, - } - } - - return Diagnostics{ - Diagnostic{ - Severity: DiagError, - Summary: err.Error(), - }, - } -} - -func diagnosticsErrorf(format string, args ...interface{}) Diagnostics { - return diagnosticsError(fmt.Errorf(format, args...)) -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go deleted file mode 100644 index 1604a6e0..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Package tfconfig is a helper library that does careful, shallow parsing of -// Terraform modules to provide access to high-level metadata while -// remaining broadly compatible with configurations targeting various -// different Terraform versions. -// -// This packge focuses on describing top-level objects only, and in particular -// does not attempt any sort of processing that would require access to plugins. -// Currently it allows callers to extract high-level information about -// variables, outputs, resource blocks, provider dependencies, and Terraform -// Core dependencies. -// -// This package only works at the level of single modules. A full configuration -// is a tree of potentially several modules, some of which may be references -// to remote packages. There are some basic helpers for traversing calls to -// modules at relative local paths, however. -// -// This package employs a "best effort" parsing strategy, producing as complete -// a result as possible even though the input may not be entirely valid. The -// intended use-case is high-level analysis and indexing of externally-facing -// module characteristics, as opposed to validating or even applying the module. -package tfconfig diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go deleted file mode 100644 index faa93ed6..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go +++ /dev/null @@ -1,130 +0,0 @@ -package tfconfig - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// LoadModule reads the directory at the given path and attempts to interpret -// it as a Terraform module. -func LoadModule(dir string) (*Module, Diagnostics) { - - // For broad compatibility here we actually have two separate loader - // codepaths. The main one uses the new HCL parser and API and is intended - // for configurations from Terraform 0.12 onwards (though will work for - // many older configurations too), but we'll also fall back on one that - // uses the _old_ HCL implementation so we can deal with some edge-cases - // that are not valid in new HCL. - - module, diags := loadModule(dir) - if diags.HasErrors() { - // Try using the legacy HCL parser and see if we fare better. - legacyModule, legacyDiags := loadModuleLegacyHCL(dir) - if !legacyDiags.HasErrors() { - legacyModule.init(legacyDiags) - return legacyModule, legacyDiags - } - } - - module.init(diags) - return module, diags -} - -// IsModuleDir checks if the given path contains terraform configuration files. -// This allows the caller to decide how to handle directories that do not have tf files. -func IsModuleDir(dir string) bool { - primaryPaths, _ := dirFiles(dir) - if len(primaryPaths) == 0 { - return false - } - return true -} - -func (m *Module) init(diags Diagnostics) { - // Fill in any additional provider requirements that are implied by - // resource configurations, to avoid the caller from needing to apply - // this logic itself. Implied requirements don't have version constraints, - // but we'll make sure the requirement value is still non-nil in this - // case so callers can easily recognize it. - for _, r := range m.ManagedResources { - if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { - m.RequiredProviders[r.Provider.Name] = []string{} - } - } - for _, r := range m.DataResources { - if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { - m.RequiredProviders[r.Provider.Name] = []string{} - } - } - - // We redundantly also reference the diagnostics from inside the module - // object, primarily so that we can easily included in JSON-serialized - // versions of the module object. - m.Diagnostics = diags -} - -func dirFiles(dir string) (primary []string, diags hcl.Diagnostics) { - infos, err := ioutil.ReadDir(dir) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Failed to read module directory", - Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), - }) - return - } - - var override []string - for _, info := range infos { - if info.IsDir() { - // We only care about files - continue - } - - name := info.Name() - ext := fileExt(name) - if ext == "" || isIgnoredFile(name) { - continue - } - - baseName := name[:len(name)-len(ext)] // strip extension - isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") - - fullPath := filepath.Join(dir, name) - if isOverride { - override = append(override, fullPath) - } else { - primary = append(primary, fullPath) - } - } - - // We are assuming that any _override files will be logically named, - // and processing the files in alphabetical order. Primaries first, then overrides. - primary = append(primary, override...) - - return -} - -// fileExt returns the Terraform configuration extension of the given -// path, or a blank string if it is not a recognized extension. -func fileExt(path string) string { - if strings.HasSuffix(path, ".tf") { - return ".tf" - } else if strings.HasSuffix(path, ".tf.json") { - return ".tf.json" - } else { - return "" - } -} - -// isIgnoredFile returns true if the given filename (which must not have a -// directory path ahead of it) should be ignored as e.g. an editor swap file. -func isIgnoredFile(name string) bool { - return strings.HasPrefix(name, ".") || // Unix-like hidden files - strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go deleted file mode 100644 index 9cb3aeef..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go +++ /dev/null @@ -1,322 +0,0 @@ -package tfconfig - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclparse" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -func loadModule(dir string) (*Module, Diagnostics) { - mod := newModule(dir) - primaryPaths, diags := dirFiles(dir) - - parser := hclparse.NewParser() - - for _, filename := range primaryPaths { - var file *hcl.File - var fileDiags hcl.Diagnostics - if strings.HasSuffix(filename, ".json") { - file, fileDiags = parser.ParseJSONFile(filename) - } else { - file, fileDiags = parser.ParseHCLFile(filename) - } - diags = append(diags, fileDiags...) - if file == nil { - continue - } - - content, _, contentDiags := file.Body.PartialContent(rootSchema) - diags = append(diags, contentDiags...) - - for _, block := range content.Blocks { - switch block.Type { - - case "terraform": - content, _, contentDiags := block.Body.PartialContent(terraformBlockSchema) - diags = append(diags, contentDiags...) - - if attr, defined := content.Attributes["required_version"]; defined { - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - if !valDiags.HasErrors() { - mod.RequiredCore = append(mod.RequiredCore, version) - } - } - - for _, block := range content.Blocks { - // Our schema only allows required_providers here, so we - // assume that we'll only get that block type. - attrs, attrDiags := block.Body.JustAttributes() - diags = append(diags, attrDiags...) - - for name, attr := range attrs { - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - if !valDiags.HasErrors() { - mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version) - } - } - } - - case "variable": - content, _, contentDiags := block.Body.PartialContent(variableSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - v := &Variable{ - Name: name, - Pos: sourcePosHCL(block.DefRange), - } - - mod.Variables[name] = v - - if attr, defined := content.Attributes["type"]; defined { - // We handle this particular attribute in a somewhat-tricky way: - // since Terraform may evolve its type expression syntax in - // future versions, we don't want to be overly-strict in how - // we handle it here, and so we'll instead just take the raw - // source provided by the user, using the source location - // information in the expression object. - // - // However, older versions of Terraform expected the type - // to be a string containing a keyword, so we'll need to - // handle that as a special case first for backward compatibility. - - var typeExpr string - - var typeExprAsStr string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &typeExprAsStr) - if !valDiags.HasErrors() { - typeExpr = typeExprAsStr - } else { - - rng := attr.Expr.Range() - sourceFilename := rng.Filename - source, exists := parser.Sources()[sourceFilename] - if exists { - typeExpr = string(rng.SliceBytes(source)) - } else { - // This should never happen, so we'll just warn about it and leave the type unspecified. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Source code not available", - Detail: fmt.Sprintf("Source code is not available for the file %q, which declares the variable %q.", sourceFilename, name), - Subject: &block.DefRange, - }) - typeExpr = "" - } - - } - - v.Type = typeExpr - } - - if attr, defined := content.Attributes["description"]; defined { - var description string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) - diags = append(diags, valDiags...) - v.Description = description - } - - if attr, defined := content.Attributes["default"]; defined { - // To avoid the caller needing to deal with cty here, we'll - // use its JSON encoding to convert into an - // approximately-equivalent plain Go interface{} value - // to return. - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - if val.IsWhollyKnown() { // should only be false if there are errors in the input - valJSON, err := ctyjson.Marshal(val, val.Type()) - if err != nil { - // Should never happen, since all possible known - // values have a JSON mapping. - panic(fmt.Errorf("failed to serialize default value as JSON: %s", err)) - } - var def interface{} - err = json.Unmarshal(valJSON, &def) - if err != nil { - // Again should never happen, because valJSON is - // guaranteed valid by ctyjson.Marshal. - panic(fmt.Errorf("failed to re-parse default value from JSON: %s", err)) - } - v.Default = def - } - } - - case "output": - - content, _, contentDiags := block.Body.PartialContent(outputSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - o := &Output{ - Name: name, - Pos: sourcePosHCL(block.DefRange), - } - - mod.Outputs[name] = o - - if attr, defined := content.Attributes["description"]; defined { - var description string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) - diags = append(diags, valDiags...) - o.Description = description - } - - case "provider": - - content, _, contentDiags := block.Body.PartialContent(providerConfigSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - - if attr, defined := content.Attributes["version"]; defined { - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - if !valDiags.HasErrors() { - mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version) - } - } - - // Even if there wasn't an explicit version required, we still - // need an entry in our map to signal the unversioned dependency. - if _, exists := mod.RequiredProviders[name]; !exists { - mod.RequiredProviders[name] = []string{} - } - - case "resource", "data": - - content, _, contentDiags := block.Body.PartialContent(resourceSchema) - diags = append(diags, contentDiags...) - - typeName := block.Labels[0] - name := block.Labels[1] - - r := &Resource{ - Type: typeName, - Name: name, - Pos: sourcePosHCL(block.DefRange), - } - - var resourcesMap map[string]*Resource - - switch block.Type { - case "resource": - r.Mode = ManagedResourceMode - resourcesMap = mod.ManagedResources - case "data": - r.Mode = DataResourceMode - resourcesMap = mod.DataResources - } - - key := r.MapKey() - - resourcesMap[key] = r - - if attr, defined := content.Attributes["provider"]; defined { - // New style here is to provide this as a naked traversal - // expression, but we also support quoted references for - // older configurations that predated this convention. - traversal, travDiags := hcl.AbsTraversalForExpr(attr.Expr) - if travDiags.HasErrors() { - traversal = nil // in case we got any partial results - - // Fall back on trying to parse as a string - var travStr string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &travStr) - if !valDiags.HasErrors() { - var strDiags hcl.Diagnostics - traversal, strDiags = hclsyntax.ParseTraversalAbs([]byte(travStr), "", hcl.Pos{}) - if strDiags.HasErrors() { - traversal = nil - } - } - } - - // If we get out here with a nil traversal then we didn't - // succeed in processing the input. - if len(traversal) > 0 { - providerName := traversal.RootName() - alias := "" - if len(traversal) > 1 { - if getAttr, ok := traversal[1].(hcl.TraverseAttr); ok { - alias = getAttr.Name - } - } - r.Provider = ProviderRef{ - Name: providerName, - Alias: alias, - } - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider reference", - Detail: "Provider argument requires a provider name followed by an optional alias, like \"aws.foo\".", - Subject: attr.Expr.Range().Ptr(), - }) - } - } else { - // If provider _isn't_ set then we'll infer it from the - // resource type. - r.Provider = ProviderRef{ - Name: resourceTypeDefaultProviderName(r.Type), - } - } - - case "module": - - content, _, contentDiags := block.Body.PartialContent(moduleCallSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - mc := &ModuleCall{ - Name: block.Labels[0], - Pos: sourcePosHCL(block.DefRange), - } - - // check if this is overriding an existing module - var origSource string - if origMod, exists := mod.ModuleCalls[name]; exists { - origSource = origMod.Source - } - - mod.ModuleCalls[name] = mc - - if attr, defined := content.Attributes["source"]; defined { - var source string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &source) - diags = append(diags, valDiags...) - mc.Source = source - } - - if mc.Source == "" { - mc.Source = origSource - } - - if attr, defined := content.Attributes["version"]; defined { - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - mc.Version = version - } - - default: - // Should never happen because our cases above should be - // exhaustive for our schema. - panic(fmt.Errorf("unhandled block type %q", block.Type)) - } - } - } - - return mod, diagnosticsHCL(diags) -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go deleted file mode 100644 index 86ffdf11..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go +++ /dev/null @@ -1,325 +0,0 @@ -package tfconfig - -import ( - "io/ioutil" - "strings" - - legacyhcl "github.com/hashicorp/hcl" - legacyast "github.com/hashicorp/hcl/hcl/ast" -) - -func loadModuleLegacyHCL(dir string) (*Module, Diagnostics) { - // This implementation is intentionally more quick-and-dirty than the - // main loader. In particular, it doesn't bother to keep careful track - // of multiple error messages because we always fall back on returning - // the main parser's error message if our fallback parsing produces - // an error, and thus the errors here are not seen by the end-caller. - mod := newModule(dir) - - primaryPaths, diags := dirFiles(dir) - if diags.HasErrors() { - return mod, diagnosticsHCL(diags) - } - - for _, filename := range primaryPaths { - src, err := ioutil.ReadFile(filename) - if err != nil { - return mod, diagnosticsErrorf("Error reading %s: %s", filename, err) - } - - hclRoot, err := legacyhcl.Parse(string(src)) - if err != nil { - return mod, diagnosticsErrorf("Error parsing %s: %s", filename, err) - } - - list, ok := hclRoot.Node.(*legacyast.ObjectList) - if !ok { - return mod, diagnosticsErrorf("Error parsing %s: no root object", filename) - } - - for _, item := range list.Filter("terraform").Items { - if len(item.Keys) > 0 { - item = &legacyast.ObjectItem{ - Val: &legacyast.ObjectType{ - List: &legacyast.ObjectList{ - Items: []*legacyast.ObjectItem{item}, - }, - }, - } - } - - type TerraformBlock struct { - RequiredVersion string `hcl:"required_version"` - } - var block TerraformBlock - err = legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("terraform block: %s", err) - } - - if block.RequiredVersion != "" { - mod.RequiredCore = append(mod.RequiredCore, block.RequiredVersion) - } - } - - if vars := list.Filter("variable"); len(vars.Items) > 0 { - vars = vars.Children() - type VariableBlock struct { - Type string `hcl:"type"` - Default interface{} - Description string - Fields []string `hcl:",decodedFields"` - } - - for _, item := range vars.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("variable block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block VariableBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid variable block at %s: %s", item.Pos(), err) - } - - // Clean up legacy HCL decoding ambiguity by unwrapping list of maps - if ms, ok := block.Default.([]map[string]interface{}); ok { - def := make(map[string]interface{}) - for _, m := range ms { - for k, v := range m { - def[k] = v - } - } - block.Default = def - } - - v := &Variable{ - Name: name, - Type: block.Type, - Description: block.Description, - Default: block.Default, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - if _, exists := mod.Variables[name]; exists { - return nil, diagnosticsErrorf("duplicate variable block for %q", name) - } - mod.Variables[name] = v - - } - } - - if outputs := list.Filter("output"); len(outputs.Items) > 0 { - outputs = outputs.Children() - type OutputBlock struct { - Description string - } - - for _, item := range outputs.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("output block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block OutputBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid output block at %s: %s", item.Pos(), err) - } - - o := &Output{ - Name: name, - Description: block.Description, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - if _, exists := mod.Outputs[name]; exists { - return nil, diagnosticsErrorf("duplicate output block for %q", name) - } - mod.Outputs[name] = o - } - } - - for _, blockType := range []string{"resource", "data"} { - if resources := list.Filter(blockType); len(resources.Items) > 0 { - resources = resources.Children() - type ResourceBlock struct { - Provider string - } - - for _, item := range resources.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 2) - - if len(item.Keys) != 2 { - return nil, diagnosticsErrorf("resource block at %s has wrong label count", item.Pos()) - } - - typeName := item.Keys[0].Token.Value().(string) - name := item.Keys[1].Token.Value().(string) - var mode ResourceMode - var rMap map[string]*Resource - switch blockType { - case "resource": - mode = ManagedResourceMode - rMap = mod.ManagedResources - case "data": - mode = DataResourceMode - rMap = mod.DataResources - } - - var block ResourceBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid resource block at %s: %s", item.Pos(), err) - } - - var providerName, providerAlias string - if dotPos := strings.IndexByte(block.Provider, '.'); dotPos != -1 { - providerName = block.Provider[:dotPos] - providerAlias = block.Provider[dotPos+1:] - } else { - providerName = block.Provider - } - if providerName == "" { - providerName = resourceTypeDefaultProviderName(typeName) - } - - r := &Resource{ - Mode: mode, - Type: typeName, - Name: name, - Provider: ProviderRef{ - Name: providerName, - Alias: providerAlias, - }, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - key := r.MapKey() - if _, exists := rMap[key]; exists { - return nil, diagnosticsErrorf("duplicate resource block for %q", key) - } - rMap[key] = r - } - } - - } - - if moduleCalls := list.Filter("module"); len(moduleCalls.Items) > 0 { - moduleCalls = moduleCalls.Children() - type ModuleBlock struct { - Source string - Version string - } - - for _, item := range moduleCalls.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("module block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block ModuleBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("module block at %s: %s", item.Pos(), err) - } - - mc := &ModuleCall{ - Name: name, - Source: block.Source, - Version: block.Version, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - // it's possible this module call is from an override file - if origMod, exists := mod.ModuleCalls[name]; exists { - if mc.Source == "" { - mc.Source = origMod.Source - } - } - mod.ModuleCalls[name] = mc - } - } - - if providerConfigs := list.Filter("provider"); len(providerConfigs.Items) > 0 { - providerConfigs = providerConfigs.Children() - type ProviderBlock struct { - Version string - } - - for _, item := range providerConfigs.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("provider block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block ProviderBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid provider block at %s: %s", item.Pos(), err) - } - - if block.Version != "" { - mod.RequiredProviders[name] = append(mod.RequiredProviders[name], block.Version) - } - - // Even if there wasn't an explicit version required, we still - // need an entry in our map to signal the unversioned dependency. - if _, exists := mod.RequiredProviders[name]; !exists { - mod.RequiredProviders[name] = []string{} - } - - } - } - } - - return mod, nil -} - -// unwrapLegacyHCLObjectKeysFromJSON cleans up an edge case that can occur when -// parsing JSON as input: if we're parsing JSON then directly nested -// items will show up as additional "keys". -// -// For objects that expect a fixed number of keys, this breaks the -// decoding process. This function unwraps the object into what it would've -// looked like if it came directly from HCL by specifying the number of keys -// you expect. -// -// Example: -// -// { "foo": { "baz": {} } } -// -// Will show up with Keys being: []string{"foo", "baz"} -// when we really just want the first two. This function will fix this. -func unwrapLegacyHCLObjectKeysFromJSON(item *legacyast.ObjectItem, depth int) { - if len(item.Keys) > depth && item.Keys[0].Token.JSON { - for len(item.Keys) > depth { - // Pop off the last key - n := len(item.Keys) - key := item.Keys[n-1] - item.Keys[n-1] = nil - item.Keys = item.Keys[:n-1] - - // Wrap our value in a list - item.Val = &legacyast.ObjectType{ - List: &legacyast.ObjectList{ - Items: []*legacyast.ObjectItem{ - &legacyast.ObjectItem{ - Keys: []*legacyast.ObjectKey{key}, - Val: item.Val, - }, - }, - }, - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go deleted file mode 100644 index 65ddb230..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go +++ /dev/null @@ -1,35 +0,0 @@ -package tfconfig - -// Module is the top-level type representing a parsed and processed Terraform -// module. -type Module struct { - // Path is the local filesystem directory where the module was loaded from. - Path string `json:"path"` - - Variables map[string]*Variable `json:"variables"` - Outputs map[string]*Output `json:"outputs"` - - RequiredCore []string `json:"required_core,omitempty"` - RequiredProviders map[string][]string `json:"required_providers"` - - ManagedResources map[string]*Resource `json:"managed_resources"` - DataResources map[string]*Resource `json:"data_resources"` - ModuleCalls map[string]*ModuleCall `json:"module_calls"` - - // Diagnostics records any errors and warnings that were detected during - // loading, primarily for inclusion in serialized forms of the module - // since this slice is also returned as a second argument from LoadModule. - Diagnostics Diagnostics `json:"diagnostics,omitempty"` -} - -func newModule(path string) *Module { - return &Module{ - Path: path, - Variables: make(map[string]*Variable), - Outputs: make(map[string]*Output), - RequiredProviders: make(map[string][]string), - ManagedResources: make(map[string]*Resource), - DataResources: make(map[string]*Resource), - ModuleCalls: make(map[string]*ModuleCall), - } -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go deleted file mode 100644 index 5e1e05a7..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go +++ /dev/null @@ -1,11 +0,0 @@ -package tfconfig - -// ModuleCall represents a "module" block within a module. That is, a -// declaration of a child module from inside its parent. -type ModuleCall struct { - Name string `json:"name"` - Source string `json:"source"` - Version string `json:"version,omitempty"` - - Pos SourcePos `json:"pos"` -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go deleted file mode 100644 index 890b25e6..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go +++ /dev/null @@ -1,9 +0,0 @@ -package tfconfig - -// Output represents a single output from a Terraform module. -type Output struct { - Name string `json:"name"` - Description string `json:"description,omitempty"` - - Pos SourcePos `json:"pos"` -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go deleted file mode 100644 index d9248377..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go +++ /dev/null @@ -1,9 +0,0 @@ -package tfconfig - -// ProviderRef is a reference to a provider configuration within a module. -// It represents the contents of a "provider" argument in a resource, or -// a value in the "providers" map for a module call. -type ProviderRef struct { - Name string `json:"name"` - Alias string `json:"alias,omitempty"` // Empty if the default provider configuration is referenced -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go deleted file mode 100644 index 401c8fce..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go +++ /dev/null @@ -1,64 +0,0 @@ -package tfconfig - -import ( - "fmt" - "strconv" - "strings" -) - -// Resource represents a single "resource" or "data" block within a module. -type Resource struct { - Mode ResourceMode `json:"mode"` - Type string `json:"type"` - Name string `json:"name"` - - Provider ProviderRef `json:"provider"` - - Pos SourcePos `json:"pos"` -} - -// MapKey returns a string that can be used to uniquely identify the receiver -// in a map[string]*Resource. -func (r *Resource) MapKey() string { - switch r.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", r.Type, r.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", r.Type, r.Name) - default: - // should never happen - return fmt.Sprintf("[invalid_mode!].%s.%s", r.Type, r.Name) - } -} - -// ResourceMode represents the "mode" of a resource, which is used to -// distinguish between managed resources ("resource" blocks in config) and -// data resources ("data" blocks in config). -type ResourceMode rune - -const InvalidResourceMode ResourceMode = 0 -const ManagedResourceMode ResourceMode = 'M' -const DataResourceMode ResourceMode = 'D' - -func (m ResourceMode) String() string { - switch m { - case ManagedResourceMode: - return "managed" - case DataResourceMode: - return "data" - default: - return "" - } -} - -// MarshalJSON implements encoding/json.Marshaler. -func (m ResourceMode) MarshalJSON() ([]byte, error) { - return []byte(strconv.Quote(m.String())), nil -} - -func resourceTypeDefaultProviderName(typeName string) string { - if underPos := strings.IndexByte(typeName, '_'); underPos != -1 { - return typeName[:underPos] - } - return typeName -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go deleted file mode 100644 index fd6ca9e7..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go +++ /dev/null @@ -1,106 +0,0 @@ -package tfconfig - -import ( - "github.com/hashicorp/hcl/v2" -) - -var rootSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - LabelNames: nil, - }, - { - Type: "variable", - LabelNames: []string{"name"}, - }, - { - Type: "output", - LabelNames: []string{"name"}, - }, - { - Type: "provider", - LabelNames: []string{"name"}, - }, - { - Type: "resource", - LabelNames: []string{"type", "name"}, - }, - { - Type: "data", - LabelNames: []string{"type", "name"}, - }, - { - Type: "module", - LabelNames: []string{"name"}, - }, - }, -} - -var terraformBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "required_version", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "required_providers", - }, - }, -} - -var providerConfigSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "version", - }, - { - Name: "alias", - }, - }, -} - -var variableSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "type", - }, - { - Name: "description", - }, - { - Name: "default", - }, - }, -} - -var outputSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - }, -} - -var moduleCallSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "source", - }, - { - Name: "version", - }, - { - Name: "providers", - }, - }, -} - -var resourceSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "provider", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go deleted file mode 100644 index 548c9f9a..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go +++ /dev/null @@ -1,50 +0,0 @@ -package tfconfig - -import ( - legacyhcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/v2" -) - -// SourcePos is a pointer to a particular location in a source file. -// -// This type is embedded into other structs to allow callers to locate the -// definition of each described module element. The SourcePos of an element -// is usually the first line of its definition, although the definition can -// be a little "fuzzy" with JSON-based config files. -type SourcePos struct { - Filename string `json:"filename"` - Line int `json:"line"` -} - -func sourcePos(filename string, line int) SourcePos { - return SourcePos{ - Filename: filename, - Line: line, - } -} - -func sourcePosHCL(rng hcl.Range) SourcePos { - // We intentionally throw away the column information here because - // current and legacy HCL both disagree on the definition of a column - // and so a line-only reference is the best granularity we can do - // such that the result is consistent between both parsers. - return SourcePos{ - Filename: rng.Filename, - Line: rng.Start.Line, - } -} - -func sourcePosLegacyHCL(pos legacyhcltoken.Pos, filename string) SourcePos { - useFilename := pos.Filename - // We'll try to use the filename given in legacy HCL position, but - // in practice there's no way to actually get this populated via - // the HCL API so it's usually empty except in some specialized - // situations, such as positions in error objects. - if useFilename == "" { - useFilename = filename - } - return SourcePos{ - Filename: useFilename, - Line: pos.Line, - } -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go deleted file mode 100644 index 0f73fc99..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go +++ /dev/null @@ -1,16 +0,0 @@ -package tfconfig - -// Variable represents a single variable from a Terraform module. -type Variable struct { - Name string `json:"name"` - Type string `json:"type,omitempty"` - Description string `json:"description,omitempty"` - - // Default is an approximate representation of the default value in - // the native Go type system. The conversion from the value given in - // configuration may be slightly lossy. Only values that can be - // serialized by json.Marshal will be included here. - Default interface{} `json:"default,omitempty"` - - Pos SourcePos `json:"pos"` -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE deleted file mode 100644 index c33dcc7c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go deleted file mode 100644 index db12cee2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go +++ /dev/null @@ -1,43 +0,0 @@ -package resource - -import ( - "context" - "net" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - tfplugin "github.com/hashicorp/terraform-plugin-sdk/plugin" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" -) - -// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC -// shim and starts it in a grpc server using an inmem connection. It returns a -// GRPCClient for this new server to test the shimmed resource provider. -func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface { - listener := bufconn.Listen(256 * 1024) - grpcServer := grpc.NewServer() - - p := plugin.NewGRPCProviderServerShim(rp) - proto.RegisterProviderServer(grpcServer, p) - - go grpcServer.Serve(listener) - - conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { - return listener.Dial() - }), grpc.WithInsecure()) - if err != nil { - panic(err) - } - - var pp tfplugin.GRPCProviderPlugin - client, _ := pp.GRPCClient(context.Background(), nil, conn) - - grpcClient := client.(*tfplugin.GRPCProvider) - grpcClient.TestServer = grpcServer - - return grpcClient -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go deleted file mode 100644 index 4befdb35..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go +++ /dev/null @@ -1,140 +0,0 @@ -package resource - -import ( - "fmt" - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// Map is a map of resources that are supported, and provides helpers for -// more easily implementing a ResourceProvider. -type Map struct { - Mapping map[string]Resource -} - -func (m *Map) Validate( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := m.Mapping[t] - if !ok { - return nil, []error{fmt.Errorf("Unknown resource type: %s", t)} - } - - // If there is no validator set, then it is valid - if r.ConfigValidator == nil { - return nil, nil - } - - return r.ConfigValidator.Validate(c) -} - -// Apply performs a create or update depending on the diff, and calls -// the proper function on the matching Resource. -func (m *Map) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - if d.Destroy || d.RequiresNew() { - if s.ID != "" { - // Destroy the resource if it is created - err := r.Destroy(s, meta) - if err != nil { - return s, err - } - - s.ID = "" - } - - // If we're only destroying, and not creating, then return now. - // Otherwise, we continue so that we can create a new resource. - if !d.RequiresNew() { - return nil, nil - } - } - - var result *terraform.InstanceState - var err error - if s.ID == "" { - result, err = r.Create(s, d, meta) - } else { - if r.Update == nil { - return s, fmt.Errorf( - "Resource type '%s' doesn't support update", - info.Type) - } - - result, err = r.Update(s, d, meta) - } - if result != nil { - if result.Attributes == nil { - result.Attributes = make(map[string]string) - } - - result.Attributes["id"] = result.ID - } - - return result, err -} - -// Diff performs a diff on the proper resource type. -func (m *Map) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - return r.Diff(s, c, meta) -} - -// Refresh performs a Refresh on the proper resource type. -// -// Refresh on the Resource won't be called if the state represents a -// non-created resource (ID is blank). -// -// An error is returned if the resource isn't registered. -func (m *Map) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the resource isn't created, don't refresh. - if s.ID == "" { - return s, nil - } - - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - return r.Refresh(s, meta) -} - -// Resources returns all the resources that are supported by this -// resource map and can be used to satisfy the Resources method of -// a ResourceProvider. -func (m *Map) Resources() []terraform.ResourceType { - ks := make([]string, 0, len(m.Mapping)) - for k := range m.Mapping { - ks = append(ks, k) - } - sort.Strings(ks) - - rs := make([]terraform.ResourceType, 0, len(m.Mapping)) - for _, k := range ks { - rs = append(rs, terraform.ResourceType{ - Name: k, - }) - } - - return rs -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go deleted file mode 100644 index 80782413..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go +++ /dev/null @@ -1,49 +0,0 @@ -package resource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/helper/config" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -type Resource struct { - ConfigValidator *config.Validator - Create CreateFunc - Destroy DestroyFunc - Diff DiffFunc - Refresh RefreshFunc - Update UpdateFunc -} - -// CreateFunc is a function that creates a resource that didn't previously -// exist. -type CreateFunc func( - *terraform.InstanceState, - *terraform.InstanceDiff, - interface{}) (*terraform.InstanceState, error) - -// DestroyFunc is a function that destroys a resource that previously -// exists using the state. -type DestroyFunc func( - *terraform.InstanceState, - interface{}) error - -// DiffFunc is a function that performs a diff of a resource. -type DiffFunc func( - *terraform.InstanceState, - *terraform.ResourceConfig, - interface{}) (*terraform.InstanceDiff, error) - -// RefreshFunc is a function that performs a refresh of a specific type -// of resource. -type RefreshFunc func( - *terraform.InstanceState, - interface{}) (*terraform.InstanceState, error) - -// UpdateFunc is a function that is called to update a resource that -// previously existed. The difference between this and CreateFunc is that -// the diff is guaranteed to only contain attributes that don't require -// a new resource. -type UpdateFunc func( - *terraform.InstanceState, - *terraform.InstanceDiff, - interface{}) (*terraform.InstanceState, error) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go deleted file mode 100644 index e21525de..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go +++ /dev/null @@ -1,404 +0,0 @@ -package resource - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "log" - "sort" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// testStepConfig runs a config-mode test step -func testStepConfig( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - return testStep(opts, state, step) -} - -func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { - if !step.Destroy { - if err := testStepTaint(state, step); err != nil { - return state, err - } - } - - cfg, err := testConfig(opts, step) - if err != nil { - return state, err - } - - var stepDiags tfdiags.Diagnostics - - // Build the context - opts.Config = cfg - opts.State, err = terraform.ShimLegacyState(state) - if err != nil { - return nil, err - } - - opts.Destroy = step.Destroy - ctx, stepDiags := terraform.NewContext(&opts) - if stepDiags.HasErrors() { - return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err()) - } - if stepDiags := ctx.Validate(); len(stepDiags) > 0 { - if stepDiags.HasErrors() { - return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) - } - - log.Printf("[WARN] Config warnings:\n%s", stepDiags) - } - - // Refresh! - newState, stepDiags := ctx.Refresh() - // shim the state first so the test can check the state on errors - - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - if stepDiags.HasErrors() { - return state, newOperationError("refresh", stepDiags) - } - - // If this step is a PlanOnly step, skip over this first Plan and subsequent - // Apply, and use the follow up Plan that checks for perpetual diffs - if !step.PlanOnly { - // Plan! - if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("plan", stepDiags) - } else { - log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) - } - - // We need to keep a copy of the state prior to destroying - // such that destroy steps can verify their behavior in the check - // function - stateBeforeApplication := state.DeepCopy() - - // Apply the diff, creating real resources. - newState, stepDiags = ctx.Apply() - // shim the state first so the test can check the state on errors - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - if stepDiags.HasErrors() { - return state, newOperationError("apply", stepDiags) - } - - // Run any configured checks - if step.Check != nil { - if step.Destroy { - if err := step.Check(stateBeforeApplication); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } else { - if err := step.Check(state); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } - } - } - - // Now, verify that Plan is now empty and we don't have a perpetual diff issue - // We do this with TWO plans. One without a refresh. - var p *plans.Plan - if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("follow-up plan", stepDiags) - } - if !p.Changes.Empty() { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } else { - return state, fmt.Errorf( - "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } - } - - // And another after a Refresh. - if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - newState, stepDiags = ctx.Refresh() - if stepDiags.HasErrors() { - return state, newOperationError("follow-up refresh", stepDiags) - } - - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - } - if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("second follow-up refresh", stepDiags) - } - empty := p.Changes.Empty() - - // Data resources are tricky because they legitimately get instantiated - // during refresh so that they will be already populated during the - // plan walk. Because of this, if we have any data resources in the - // config we'll end up wanting to destroy them again here. This is - // acceptable and expected, and we'll treat it as "empty" for the - // sake of this testing. - if step.Destroy && !empty { - empty = true - for _, change := range p.Changes.Resources { - if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode { - empty = false - break - } - } - } - - if !empty { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } else { - return state, fmt.Errorf( - "After applying this step and refreshing, "+ - "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } - } - - // Made it here, but expected a non-empty plan, fail! - if step.ExpectNonEmptyPlan && empty { - return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") - } - - // Made it here? Good job test step! - return state, nil -} - -// legacyPlanComparisonString produces a string representation of the changes -// from a plan and a given state togther, as was formerly produced by the -// String method of terraform.Plan. -// -// This is here only for compatibility with existing tests that predate our -// new plan and state types, and should not be used in new tests. Instead, use -// a library like "cmp" to do a deep equality and diff on the two -// data structures. -func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { - return fmt.Sprintf( - "DIFF:\n\n%s\n\nSTATE:\n\n%s", - legacyDiffComparisonString(changes), - state.String(), - ) -} - -// legacyDiffComparisonString produces a string representation of the changes -// from a planned changes object, as was formerly produced by the String method -// of terraform.Diff. -// -// This is here only for compatibility with existing tests that predate our -// new plan types, and should not be used in new tests. Instead, use a library -// like "cmp" to do a deep equality check and diff on the two data structures. -func legacyDiffComparisonString(changes *plans.Changes) string { - // The old string representation of a plan was grouped by module, but - // our new plan structure is not grouped in that way and so we'll need - // to preprocess it in order to produce that grouping. - type ResourceChanges struct { - Current *plans.ResourceInstanceChangeSrc - Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc - } - byModule := map[string]map[string]*ResourceChanges{} - resourceKeys := map[string][]string{} - requiresReplace := map[string][]string{} - var moduleKeys []string - for _, rc := range changes.Resources { - if rc.Action == plans.NoOp { - // We won't mention no-op changes here at all, since the old plan - // model we are emulating here didn't have such a concept. - continue - } - moduleKey := rc.Addr.Module.String() - if _, exists := byModule[moduleKey]; !exists { - moduleKeys = append(moduleKeys, moduleKey) - byModule[moduleKey] = make(map[string]*ResourceChanges) - } - resourceKey := rc.Addr.Resource.String() - if _, exists := byModule[moduleKey][resourceKey]; !exists { - resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) - byModule[moduleKey][resourceKey] = &ResourceChanges{ - Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), - } - } - - if rc.DeposedKey == states.NotDeposed { - byModule[moduleKey][resourceKey].Current = rc - } else { - byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc - } - - rr := []string{} - for _, p := range rc.RequiredReplace.List() { - rr = append(rr, hcl2shim.FlatmapKeyFromPath(p)) - } - requiresReplace[resourceKey] = rr - } - sort.Strings(moduleKeys) - for _, ks := range resourceKeys { - sort.Strings(ks) - } - - var buf bytes.Buffer - - for _, moduleKey := range moduleKeys { - rcs := byModule[moduleKey] - var mBuf bytes.Buffer - - for _, resourceKey := range resourceKeys[moduleKey] { - rc := rcs[resourceKey] - - forceNewAttrs := requiresReplace[resourceKey] - - crud := "UPDATE" - if rc.Current != nil { - switch rc.Current.Action { - case plans.DeleteThenCreate: - crud = "DESTROY/CREATE" - case plans.CreateThenDelete: - crud = "CREATE/DESTROY" - case plans.Delete: - crud = "DESTROY" - case plans.Create: - crud = "CREATE" - } - } else { - // We must be working on a deposed object then, in which - // case destroying is the only possible action. - crud = "DESTROY" - } - - extra := "" - if rc.Current == nil && len(rc.Deposed) > 0 { - extra = " (deposed only)" - } - - fmt.Fprintf( - &mBuf, "%s: %s%s\n", - crud, resourceKey, extra, - ) - - attrNames := map[string]bool{} - var oldAttrs map[string]string - var newAttrs map[string]string - if rc.Current != nil { - if before := rc.Current.Before; before != nil { - ty, err := before.ImpliedType() - if err == nil { - val, err := before.Decode(ty) - if err == nil { - oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range oldAttrs { - attrNames[k] = true - } - } - } - } - if after := rc.Current.After; after != nil { - ty, err := after.ImpliedType() - if err == nil { - val, err := after.Decode(ty) - if err == nil { - newAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range newAttrs { - attrNames[k] = true - } - } - } - } - } - if oldAttrs == nil { - oldAttrs = make(map[string]string) - } - if newAttrs == nil { - newAttrs = make(map[string]string) - } - - attrNamesOrder := make([]string, 0, len(attrNames)) - keyLen := 0 - for n := range attrNames { - attrNamesOrder = append(attrNamesOrder, n) - if len(n) > keyLen { - keyLen = len(n) - } - } - sort.Strings(attrNamesOrder) - - for _, attrK := range attrNamesOrder { - v := newAttrs[attrK] - u := oldAttrs[attrK] - - if v == hcl2shim.UnknownVariableValue { - v = "" - } - // NOTE: we don't support here because we would - // need schema to do that. Excluding sensitive values - // is now done at the UI layer, and so should not be tested - // at the core layer. - - updateMsg := "" - - // This may not be as precise as in the old diff, as it matches - // everything under the attribute that was originally marked as - // ForceNew, but should help make it easier to determine what - // caused replacement here. - for _, k := range forceNewAttrs { - if strings.HasPrefix(attrK, k) { - updateMsg = " (forces new resource)" - break - } - } - - fmt.Fprintf( - &mBuf, " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, v, - updateMsg, - ) - } - } - - if moduleKey == "" { // root module - buf.Write(mBuf.Bytes()) - buf.WriteByte('\n') - continue - } - - fmt.Fprintf(&buf, "%s:\n", moduleKey) - s := bufio.NewScanner(&mBuf) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return buf.String() -} - -func testStepTaint(state *terraform.State, step TestStep) error { - for _, p := range step.Taint { - m := state.RootModule() - if m == nil { - return errors.New("no state") - } - rs, ok := m.Resources[p] - if !ok { - return fmt.Errorf("resource %q not found in state", p) - } - log.Printf("[WARN] Test: Explicitly tainting resource %q", p) - rs.Taint() - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go deleted file mode 100644 index 7b7c30a7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go +++ /dev/null @@ -1,233 +0,0 @@ -package resource - -import ( - "fmt" - "log" - "reflect" - "strings" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// testStepImportState runs an imort state test step -func testStepImportState( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - - // Determine the ID to import - var importId string - switch { - case step.ImportStateIdFunc != nil: - var err error - importId, err = step.ImportStateIdFunc(state) - if err != nil { - return state, err - } - case step.ImportStateId != "": - importId = step.ImportStateId - default: - resource, err := testResource(step, state) - if err != nil { - return state, err - } - importId = resource.Primary.ID - } - - importPrefix := step.ImportStateIdPrefix - if importPrefix != "" { - importId = fmt.Sprintf("%s%s", importPrefix, importId) - } - - // Setup the context. We initialize with an empty state. We use the - // full config for provider configurations. - cfg, err := testConfig(opts, step) - if err != nil { - return state, err - } - - opts.Config = cfg - - // import tests start with empty state - opts.State = states.NewState() - - ctx, stepDiags := terraform.NewContext(&opts) - if stepDiags.HasErrors() { - return state, stepDiags.Err() - } - - // The test step provides the resource address as a string, so we need - // to parse it to get an addrs.AbsResourceAddress to pass in to the - // import method. - traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{}) - if hclDiags.HasErrors() { - return nil, hclDiags - } - importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal) - if stepDiags.HasErrors() { - return nil, stepDiags.Err() - } - - // Do the import - importedState, stepDiags := ctx.Import(&terraform.ImportOpts{ - // Set the module so that any provider config is loaded - Config: cfg, - - Targets: []*terraform.ImportTarget{ - { - Addr: importAddr, - ID: importId, - }, - }, - }) - if stepDiags.HasErrors() { - log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err()) - return state, stepDiags.Err() - } - - newState, err := shimNewState(importedState, step.providers) - if err != nil { - return nil, err - } - - // Go through the new state and verify - if step.ImportStateCheck != nil { - var states []*terraform.InstanceState - for _, r := range newState.RootModule().Resources { - if r.Primary != nil { - is := r.Primary.DeepCopy() - is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type - states = append(states, is) - } - } - if err := step.ImportStateCheck(states); err != nil { - return state, err - } - } - - // Verify that all the states match - if step.ImportStateVerify { - new := newState.RootModule().Resources - old := state.RootModule().Resources - for _, r := range new { - // Find the existing resource - var oldR *terraform.ResourceState - for _, r2 := range old { - if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { - oldR = r2 - break - } - } - if oldR == nil { - return state, fmt.Errorf( - "Failed state verification, resource with ID %s not found", - r.Primary.ID) - } - - // We'll try our best to find the schema for this resource type - // so we can ignore Removed fields during validation. If we fail - // to find the schema then we won't ignore them and so the test - // will need to rely on explicit ImportStateVerifyIgnore, though - // this shouldn't happen in any reasonable case. - var rsrcSchema *schema.Resource - if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() { - providerType := providerAddr.ProviderConfig.Type - if provider, ok := step.providers[providerType]; ok { - if provider, ok := provider.(*schema.Provider); ok { - rsrcSchema = provider.ResourcesMap[r.Type] - } - } - } - - // don't add empty flatmapped containers, so we can more easily - // compare the attributes - skipEmpty := func(k, v string) bool { - if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { - if v == "0" { - return true - } - } - return false - } - - // Compare their attributes - actual := make(map[string]string) - for k, v := range r.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - actual[k] = v - } - - expected := make(map[string]string) - for k, v := range oldR.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - expected[k] = v - } - - // Remove fields we're ignoring - for _, v := range step.ImportStateVerifyIgnore { - for k := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - // Also remove any attributes that are marked as "Removed" in the - // schema, if we have a schema to check that against. - if rsrcSchema != nil { - for k := range actual { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(actual, k) - break - } - } - } - for k := range expected { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(expected, k) - break - } - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return state, fmt.Errorf( - "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - } - } - - // Return the old state (non-imported) so we don't change anything. - return state, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go deleted file mode 100644 index 609c208b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go +++ /dev/null @@ -1,200 +0,0 @@ -package schema - -import ( - "context" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - ctyconvert "github.com/zclconf/go-cty/cty/convert" -) - -// Backend represents a partial backend.Backend implementation and simplifies -// the creation of configuration loading and validation. -// -// Unlike other schema structs such as Provider, this struct is meant to be -// embedded within your actual implementation. It provides implementations -// only for Input and Configure and gives you a method for accessing the -// configuration in the form of a ResourceData that you're expected to call -// from the other implementation funcs. -type Backend struct { - // Schema is the schema for the configuration of this backend. If this - // Backend has no configuration this can be omitted. - Schema map[string]*Schema - - // ConfigureFunc is called to configure the backend. Use the - // FromContext* methods to extract information from the context. - // This can be nil, in which case nothing will be called but the - // config will still be stored. - ConfigureFunc func(context.Context) error - - config *ResourceData -} - -var ( - backendConfigKey = contextKey("backend config") -) - -// FromContextBackendConfig extracts a ResourceData with the configuration -// from the context. This should only be called by Backend functions. -func FromContextBackendConfig(ctx context.Context) *ResourceData { - return ctx.Value(backendConfigKey).(*ResourceData) -} - -func (b *Backend) ConfigSchema() *configschema.Block { - // This is an alias of CoreConfigSchema just to implement the - // backend.Backend interface. - return b.CoreConfigSchema() -} - -func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { - if b == nil { - return configVal, nil - } - var diags tfdiags.Diagnostics - var err error - - // In order to use Transform below, this needs to be filled out completely - // according the schema. - configVal, err = b.CoreConfigSchema().CoerceValue(configVal) - if err != nil { - return configVal, diags.Append(err) - } - - // lookup any required, top-level attributes that are Null, and see if we - // have a Default value available. - configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { - // we're only looking for top-level attributes - if len(path) != 1 { - return val, nil - } - - // nothing to do if we already have a value - if !val.IsNull() { - return val, nil - } - - // get the Schema definition for this attribute - getAttr, ok := path[0].(cty.GetAttrStep) - // these should all exist, but just ignore anything strange - if !ok { - return val, nil - } - - attrSchema := b.Schema[getAttr.Name] - // continue to ignore anything that doesn't match - if attrSchema == nil { - return val, nil - } - - // this is deprecated, so don't set it - if attrSchema.Deprecated != "" || attrSchema.Removed != "" { - return val, nil - } - - // find a default value if it exists - def, err := attrSchema.DefaultValue() - if err != nil { - diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) - return val, err - } - - // no default - if def == nil { - return val, nil - } - - // create a cty.Value and make sure it's the correct type - tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) - - // helper/schema used to allow setting "" to a bool - if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { - // return a warning about the conversion - diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name) - tmpVal = cty.False - } - - val, err = ctyconvert.Convert(tmpVal, val.Type()) - if err != nil { - diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) - } - - return val, err - }) - if err != nil { - // any error here was already added to the diagnostics - return configVal, diags - } - - shimRC := b.shimConfig(configVal) - warns, errs := schemaMap(b.Schema).Validate(shimRC) - for _, warn := range warns { - diags = diags.Append(tfdiags.SimpleWarning(warn)) - } - for _, err := range errs { - diags = diags.Append(err) - } - return configVal, diags -} - -func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { - if b == nil { - return nil - } - - var diags tfdiags.Diagnostics - sm := schemaMap(b.Schema) - shimRC := b.shimConfig(obj) - - // Get a ResourceData for this configuration. To do this, we actually - // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, shimRC, nil, nil, true) - if err != nil { - diags = diags.Append(err) - return diags - } - - data, err := sm.Data(nil, diff) - if err != nil { - diags = diags.Append(err) - return diags - } - b.config = data - - if b.ConfigureFunc != nil { - err = b.ConfigureFunc(context.WithValue( - context.Background(), backendConfigKey, data)) - if err != nil { - diags = diags.Append(err) - return diags - } - } - - return diags -} - -// shimConfig turns a new-style cty.Value configuration (which must be of -// an object type) into a minimal old-style *terraform.ResourceConfig object -// that should be populated enough to appease the not-yet-updated functionality -// in this package. This should be removed once everything is updated. -func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig { - shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{}) - if !ok { - // If the configVal was nil, we still want a non-nil map here. - shimMap = map[string]interface{}{} - } - return &terraform.ResourceConfig{ - Config: shimMap, - Raw: shimMap, - } -} - -// Config returns the configuration. This is available after Configure is -// called. -func (b *Backend) Config() *ResourceData { - return b.config -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go deleted file mode 100644 index 36b494c0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go +++ /dev/null @@ -1,26 +0,0 @@ -package httpclient - -import ( - "fmt" - "log" - "os" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/meta" -) - -const uaEnvVar = "TF_APPEND_USER_AGENT" - -func TerraformUserAgent(version string) string { - ua := fmt.Sprintf("HashiCorp Terraform/%s (+https://www.terraform.io) Terraform Plugin SDK/%s", version, meta.SDKVersionString()) - - if add := os.Getenv(uaEnvVar); add != "" { - add = strings.TrimSpace(add) - if len(add) > 0 { - ua += " " + add - log.Printf("[DEBUG] Using modified User-Agent: %s", ua) - } - } - - return ua -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go deleted file mode 100644 index 90a5faf0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// CountAttr is the address of an attribute of the "count" object in -// the interpolation scope, like "count.index". -type CountAttr struct { - referenceable - Name string -} - -func (ca CountAttr) String() string { - return "count." + ca.Name -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go deleted file mode 100644 index 7a638503..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// ForEachAttr is the address of an attribute referencing the current "for_each" object in -// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value" -type ForEachAttr struct { - referenceable - Name string -} - -func (f ForEachAttr) String() string { - return "each." + f.Name -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go deleted file mode 100644 index d2c046c1..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go +++ /dev/null @@ -1,41 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// InputVariable is the address of an input variable. -type InputVariable struct { - referenceable - Name string -} - -func (v InputVariable) String() string { - return "var." + v.Name -} - -// AbsInputVariableInstance is the address of an input variable within a -// particular module instance. -type AbsInputVariableInstance struct { - Module ModuleInstance - Variable InputVariable -} - -// InputVariable returns the absolute address of the input variable of the -// given name inside the receiving module instance. -func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance { - return AbsInputVariableInstance{ - Module: m, - Variable: InputVariable{ - Name: name, - }, - } -} - -func (v AbsInputVariableInstance) String() string { - if len(v.Module) == 0 { - return v.String() - } - - return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go deleted file mode 100644 index cef8b279..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go +++ /dev/null @@ -1,123 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// InstanceKey represents the key of an instance within an object that -// contains multiple instances due to using "count" or "for_each" arguments -// in configuration. -// -// IntKey and StringKey are the two implementations of this type. No other -// implementations are allowed. The single instance of an object that _isn't_ -// using "count" or "for_each" is represented by NoKey, which is a nil -// InstanceKey. -type InstanceKey interface { - instanceKeySigil() - String() string -} - -// ParseInstanceKey returns the instance key corresponding to the given value, -// which must be known and non-null. -// -// If an unknown or null value is provided then this function will panic. This -// function is intended to deal with the values that would naturally be found -// in a hcl.TraverseIndex, which (when parsed from source, at least) can never -// contain unknown or null values. -func ParseInstanceKey(key cty.Value) (InstanceKey, error) { - switch key.Type() { - case cty.String: - return StringKey(key.AsString()), nil - case cty.Number: - var idx int - err := gocty.FromCtyValue(key, &idx) - return IntKey(idx), err - default: - return NoKey, fmt.Errorf("either a string or an integer is required") - } -} - -// NoKey represents the absense of an InstanceKey, for the single instance -// of a configuration object that does not use "count" or "for_each" at all. -var NoKey InstanceKey - -// IntKey is the InstanceKey representation representing integer indices, as -// used when the "count" argument is specified or if for_each is used with -// a sequence type. -type IntKey int - -func (k IntKey) instanceKeySigil() { -} - -func (k IntKey) String() string { - return fmt.Sprintf("[%d]", int(k)) -} - -// StringKey is the InstanceKey representation representing string indices, as -// used when the "for_each" argument is specified with a map or object type. -type StringKey string - -func (k StringKey) instanceKeySigil() { -} - -func (k StringKey) String() string { - // FIXME: This isn't _quite_ right because Go's quoted string syntax is - // slightly different than HCL's, but we'll accept it for now. - return fmt.Sprintf("[%q]", string(k)) -} - -// InstanceKeyLess returns true if the first given instance key i should sort -// before the second key j, and false otherwise. -func InstanceKeyLess(i, j InstanceKey) bool { - iTy := instanceKeyType(i) - jTy := instanceKeyType(j) - - switch { - case i == j: - return false - case i == NoKey: - return true - case j == NoKey: - return false - case iTy != jTy: - // The ordering here is arbitrary except that we want NoKeyType - // to sort before the others, so we'll just use the enum values - // of InstanceKeyType here (where NoKey is zero, sorting before - // any other). - return uint32(iTy) < uint32(jTy) - case iTy == IntKeyType: - return int(i.(IntKey)) < int(j.(IntKey)) - case iTy == StringKeyType: - return string(i.(StringKey)) < string(j.(StringKey)) - default: - // Shouldn't be possible to get down here in practice, since the - // above is exhaustive. - return false - } -} - -func instanceKeyType(k InstanceKey) InstanceKeyType { - if _, ok := k.(StringKey); ok { - return StringKeyType - } - if _, ok := k.(IntKey); ok { - return IntKeyType - } - return NoKeyType -} - -// InstanceKeyType represents the different types of instance key that are -// supported. Usually it is sufficient to simply type-assert an InstanceKey -// value to either IntKey or StringKey, but this type and its values can be -// used to represent the types themselves, rather than specific values -// of those types. -type InstanceKeyType rune - -const ( - NoKeyType InstanceKeyType = 0 - IntKeyType InstanceKeyType = 'I' - StringKeyType InstanceKeyType = 'S' -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go deleted file mode 100644 index 61a07b9c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go +++ /dev/null @@ -1,48 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// LocalValue is the address of a local value. -type LocalValue struct { - referenceable - Name string -} - -func (v LocalValue) String() string { - return "local." + v.Name -} - -// Absolute converts the receiver into an absolute address within the given -// module instance. -func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue { - return AbsLocalValue{ - Module: m, - LocalValue: v, - } -} - -// AbsLocalValue is the absolute address of a local value within a module instance. -type AbsLocalValue struct { - Module ModuleInstance - LocalValue LocalValue -} - -// LocalValue returns the absolute address of a local value of the given -// name within the receiving module instance. -func (m ModuleInstance) LocalValue(name string) AbsLocalValue { - return AbsLocalValue{ - Module: m, - LocalValue: LocalValue{ - Name: name, - }, - } -} - -func (v AbsLocalValue) String() string { - if len(v.Module) == 0 { - return v.LocalValue.String() - } - return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go deleted file mode 100644 index 1533f853..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go +++ /dev/null @@ -1,51 +0,0 @@ -package addrs - -import ( - "strings" -) - -// Module is an address for a module call within configuration. This is -// the static counterpart of ModuleInstance, representing a traversal through -// the static module call tree in configuration and does not take into account -// the potentially-multiple instances of a module that might be created by -// "count" and "for_each" arguments within those calls. -// -// This type should be used only in very specialized cases when working with -// the static module call tree. Type ModuleInstance is appropriate in more cases. -// -// Although Module is a slice, it should be treated as immutable after creation. -type Module []string - -// IsRoot returns true if the receiver is the address of the root module, -// or false otherwise. -func (m Module) IsRoot() bool { - return len(m) == 0 -} - -func (m Module) String() string { - if len(m) == 0 { - return "" - } - return strings.Join([]string(m), ".") -} - -// Call returns the module call address that corresponds to the given module -// instance, along with the address of the module that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCall and then returns a slice of the receiever that excludes that -// last part. This is just a convenience for situations where a call address -// is required, such as when dealing with *Reference and Referencable values. -func (m Module) Call() (Module, ModuleCall) { - if len(m) == 0 { - panic("cannot produce ModuleCall for root module") - } - - caller, callName := m[:len(m)-1], m[len(m)-1] - return caller, ModuleCall{ - Name: callName, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go deleted file mode 100644 index d138fade..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go +++ /dev/null @@ -1,63 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// ModuleCall is the address of a call from the current module to a child -// module. -// -// There is no "Abs" version of ModuleCall because an absolute module path -// is represented by ModuleInstance. -type ModuleCall struct { - referenceable - Name string -} - -func (c ModuleCall) String() string { - return "module." + c.Name -} - -// ModuleCallInstance is the address of one instance of a module created from -// a module call, which might create multiple instances using "count" or -// "for_each" arguments. -type ModuleCallInstance struct { - referenceable - Call ModuleCall - Key InstanceKey -} - -func (c ModuleCallInstance) String() string { - if c.Key == NoKey { - return c.Call.String() - } - return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key) -} - -// ModuleInstance returns the address of the module instance that corresponds -// to the receiving call instance when resolved in the given calling module. -// In other words, it returns the child module instance that the receving -// call instance creates. -func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance { - return caller.Child(c.Call.Name, c.Key) -} - -// ModuleCallOutput is the address of a particular named output produced by -// an instance of a module call. -type ModuleCallOutput struct { - referenceable - Call ModuleCallInstance - Name string -} - -func (co ModuleCallOutput) String() string { - return fmt.Sprintf("%s.%s", co.Call.String(), co.Name) -} - -// AbsOutputValue returns the absolute output value address that corresponds -// to the receving module call output address, once resolved in the given -// calling module. -func (co ModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { - moduleAddr := co.Call.ModuleInstance(caller) - return moduleAddr.OutputValue(co.Name) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go deleted file mode 100644 index bb0901a2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go +++ /dev/null @@ -1,388 +0,0 @@ -package addrs - -import ( - "bytes" - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ModuleInstance is an address for a particular module instance within the -// dynamic module tree. This is an extension of the static traversals -// represented by type Module that deals with the possibility of a single -// module call producing multiple instances via the "count" and "for_each" -// arguments. -// -// Although ModuleInstance is a slice, it should be treated as immutable after -// creation. -type ModuleInstance []ModuleInstanceStep - -var ( - _ Targetable = ModuleInstance(nil) -) - -func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { - mi, remain, diags := parseModuleInstancePrefix(traversal) - if len(remain) != 0 { - if len(remain) == len(traversal) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance address", - Detail: "A module instance address must begin with \"module.\".", - Subject: remain.SourceRange().Ptr(), - }) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance address", - Detail: "The module instance address is followed by additional invalid content.", - Subject: remain.SourceRange().Ptr(), - }) - } - } - return mi, diags -} - -// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseProviderConfigCompact. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// then the returned address is invalid. -func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - addr, addrDiags := ParseModuleInstance(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { - remain := traversal - var mi ModuleInstance - var diags tfdiags.Diagnostics - - for len(remain) > 0 { - var next string - switch tt := remain[0].(type) { - case hcl.TraverseRoot: - next = tt.Name - case hcl.TraverseAttr: - next = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Module address prefix must be followed by dot and then a name.", - Subject: remain[0].SourceRange().Ptr(), - }) - break - } - - if next != "module" { - break - } - - kwRange := remain[0].SourceRange() - remain = remain[1:] - // If we have the prefix "module" then we should be followed by an - // module call name, as an attribute, and then optionally an index step - // giving the instance key. - if len(remain) == 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Prefix \"module.\" must be followed by a module name.", - Subject: &kwRange, - }) - break - } - - var moduleName string - switch tt := remain[0].(type) { - case hcl.TraverseAttr: - moduleName = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Prefix \"module.\" must be followed by a module name.", - Subject: remain[0].SourceRange().Ptr(), - }) - break - } - remain = remain[1:] - step := ModuleInstanceStep{ - Name: moduleName, - } - - if len(remain) > 0 { - if idx, ok := remain[0].(hcl.TraverseIndex); ok { - remain = remain[1:] - - switch idx.Key.Type() { - case cty.String: - step.InstanceKey = StringKey(idx.Key.AsString()) - case cty.Number: - var idxInt int - err := gocty.FromCtyValue(idx.Key, &idxInt) - if err == nil { - step.InstanceKey = IntKey(idxInt) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: fmt.Sprintf("Invalid module index: %s.", err), - Subject: idx.SourceRange().Ptr(), - }) - } - default: - // Should never happen, because no other types are allowed in traversal indices. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Invalid module key: must be either a string or an integer.", - Subject: idx.SourceRange().Ptr(), - }) - } - } - } - - mi = append(mi, step) - } - - var retRemain hcl.Traversal - if len(remain) > 0 { - retRemain = make(hcl.Traversal, len(remain)) - copy(retRemain, remain) - // The first element here might be either a TraverseRoot or a - // TraverseAttr, depending on whether we had a module address on the - // front. To make life easier for callers, we'll normalize to always - // start with a TraverseRoot. - if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { - retRemain[0] = hcl.TraverseRoot{ - Name: tt.Name, - SrcRange: tt.SrcRange, - } - } - } - - return mi, retRemain, diags -} - -// UnkeyedInstanceShim is a shim method for converting a Module address to the -// equivalent ModuleInstance address that assumes that no modules have -// keyed instances. -// -// This is a temporary allowance for the fact that Terraform does not presently -// support "count" and "for_each" on modules, and thus graph building code that -// derives graph nodes from configuration must just assume unkeyed modules -// in order to construct the graph. At a later time when "count" and "for_each" -// support is added for modules, all callers of this method will need to be -// reworked to allow for keyed module instances. -func (m Module) UnkeyedInstanceShim() ModuleInstance { - path := make(ModuleInstance, len(m)) - for i, name := range m { - path[i] = ModuleInstanceStep{Name: name} - } - return path -} - -// ModuleInstanceStep is a single traversal step through the dynamic module -// tree. It is used only as part of ModuleInstance. -type ModuleInstanceStep struct { - Name string - InstanceKey InstanceKey -} - -// RootModuleInstance is the module instance address representing the root -// module, which is also the zero value of ModuleInstance. -var RootModuleInstance ModuleInstance - -// IsRoot returns true if the receiver is the address of the root module instance, -// or false otherwise. -func (m ModuleInstance) IsRoot() bool { - return len(m) == 0 -} - -// Child returns the address of a child module instance of the receiver, -// identified by the given name and key. -func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance { - ret := make(ModuleInstance, 0, len(m)+1) - ret = append(ret, m...) - return append(ret, ModuleInstanceStep{ - Name: name, - InstanceKey: key, - }) -} - -// Parent returns the address of the parent module instance of the receiver, or -// the receiver itself if there is no parent (if it's the root module address). -func (m ModuleInstance) Parent() ModuleInstance { - if len(m) == 0 { - return m - } - return m[:len(m)-1] -} - -// String returns a string representation of the receiver, in the format used -// within e.g. user-provided resource addresses. -// -// The address of the root module has the empty string as its representation. -func (m ModuleInstance) String() string { - var buf bytes.Buffer - sep := "" - for _, step := range m { - buf.WriteString(sep) - buf.WriteString("module.") - buf.WriteString(step.Name) - if step.InstanceKey != NoKey { - buf.WriteString(step.InstanceKey.String()) - } - sep = "." - } - return buf.String() -} - -// Less returns true if the receiver should sort before the given other value -// in a sorted list of addresses. -func (m ModuleInstance) Less(o ModuleInstance) bool { - if len(m) != len(o) { - // Shorter path sorts first. - return len(m) < len(o) - } - - for i := range m { - mS, oS := m[i], o[i] - switch { - case mS.Name != oS.Name: - return mS.Name < oS.Name - case mS.InstanceKey != oS.InstanceKey: - return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey) - } - } - - return false -} - -// Ancestors returns a slice containing the receiver and all of its ancestor -// module instances, all the way up to (and including) the root module. -// The result is ordered by depth, with the root module always first. -// -// Since the result always includes the root module, a caller may choose to -// ignore it by slicing the result with [1:]. -func (m ModuleInstance) Ancestors() []ModuleInstance { - ret := make([]ModuleInstance, 0, len(m)+1) - for i := 0; i <= len(m); i++ { - ret = append(ret, m[:i]) - } - return ret -} - -// Call returns the module call address that corresponds to the given module -// instance, along with the address of the module instance that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// A single module call can produce potentially many module instances, so the -// result discards any instance key that might be present on the last step -// of the instance. To retain this, use CallInstance instead. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCall and then returns a slice of the receiever that excludes that -// last part. This is just a convenience for situations where a call address -// is required, such as when dealing with *Reference and Referencable values. -func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) { - if len(m) == 0 { - panic("cannot produce ModuleCall for root module") - } - - inst, lastStep := m[:len(m)-1], m[len(m)-1] - return inst, ModuleCall{ - Name: lastStep.Name, - } -} - -// CallInstance returns the module call instance address that corresponds to -// the given module instance, along with the address of the module instance -// that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCallInstance and then returns a slice of the receiever that excludes -// that last part. This is just a convenience for situations where a call\ -// address is required, such as when dealing with *Reference and Referencable -// values. -func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) { - if len(m) == 0 { - panic("cannot produce ModuleCallInstance for root module") - } - - inst, lastStep := m[:len(m)-1], m[len(m)-1] - return inst, ModuleCallInstance{ - Call: ModuleCall{ - Name: lastStep.Name, - }, - Key: lastStep.InstanceKey, - } -} - -// TargetContains implements Targetable by returning true if the given other -// address either matches the receiver, is a sub-module-instance of the -// receiver, or is a targetable absolute address within a module that -// is contained within the reciever. -func (m ModuleInstance) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case ModuleInstance: - if len(to) < len(m) { - // Can't be contained if the path is shorter - return false - } - // Other is contained if its steps match for the length of our own path. - for i, ourStep := range m { - otherStep := to[i] - if ourStep != otherStep { - return false - } - } - // If we fall out here then the prefixed matched, so it's contained. - return true - - case AbsResource: - return m.TargetContains(to.Module) - - case AbsResourceInstance: - return m.TargetContains(to.Module) - - default: - return false - } -} - -func (m ModuleInstance) targetableSigil() { - // ModuleInstance is targetable -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go deleted file mode 100644 index bcd923ac..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go +++ /dev/null @@ -1,75 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// OutputValue is the address of an output value, in the context of the module -// that is defining it. -// -// This is related to but separate from ModuleCallOutput, which represents -// a module output from the perspective of its parent module. Since output -// values cannot be represented from the module where they are defined, -// OutputValue is not Referenceable, while ModuleCallOutput is. -type OutputValue struct { - Name string -} - -func (v OutputValue) String() string { - return "output." + v.Name -} - -// Absolute converts the receiver into an absolute address within the given -// module instance. -func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { - return AbsOutputValue{ - Module: m, - OutputValue: v, - } -} - -// AbsOutputValue is the absolute address of an output value within a module instance. -// -// This represents an output globally within the namespace of a particular -// configuration. It is related to but separate from ModuleCallOutput, which -// represents a module output from the perspective of its parent module. -type AbsOutputValue struct { - Module ModuleInstance - OutputValue OutputValue -} - -// OutputValue returns the absolute address of an output value of the given -// name within the receiving module instance. -func (m ModuleInstance) OutputValue(name string) AbsOutputValue { - return AbsOutputValue{ - Module: m, - OutputValue: OutputValue{ - Name: name, - }, - } -} - -func (v AbsOutputValue) String() string { - if v.Module.IsRoot() { - return v.OutputValue.String() - } - return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) -} - -// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, -// returning also the module instance that the ModuleCallOutput is relative -// to. -// -// The root module does not have a call, and so this method cannot be used -// with outputs in the root module, and will panic in that case. -func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallOutput) { - if v.Module.IsRoot() { - panic("ReferenceFromCall used with root module output") - } - - caller, call := v.Module.CallInstance() - return caller, ModuleCallOutput{ - Call: call, - Name: v.OutputValue.Name, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go deleted file mode 100644 index a2ee1644..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go +++ /dev/null @@ -1,346 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Reference describes a reference to an address with source location -// information. -type Reference struct { - Subject Referenceable - SourceRange tfdiags.SourceRange - Remaining hcl.Traversal -} - -// ParseRef attempts to extract a referencable address from the prefix of the -// given traversal, which must be an absolute traversal or this function -// will panic. -// -// If no error diagnostics are returned, the returned reference includes the -// address that was extracted, the source range it was extracted from, and any -// remaining relative traversal that was not consumed as part of the -// reference. -// -// If error diagnostics are returned then the Reference value is invalid and -// must not be used. -func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - ref, diags := parseRef(traversal) - - // Normalize a little to make life easier for callers. - if ref != nil { - if len(ref.Remaining) == 0 { - ref.Remaining = nil - } - } - - return ref, diags -} - -// ParseRefStr is a helper wrapper around ParseRef that takes a string -// and parses it with the HCL native syntax traversal parser before -// interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseRef. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned reference may be nil or incomplete. -func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - ref, targetDiags := ParseRef(traversal) - diags = diags.Append(targetDiags) - return ref, diags -} - -func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - root := traversal.RootName() - rootRange := traversal[0].SourceRange() - - switch root { - - case "count": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: CountAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "each": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: ForEachAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "data": - if len(traversal) < 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`, - Subject: traversal.SourceRange().Ptr(), - }) - return nil, diags - } - remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser - return parseResourceRef(DataResourceMode, rootRange, remain) - - case "local": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: LocalValue{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "module": - callName, callRange, remain, diags := parseSingleAttrRef(traversal) - if diags.HasErrors() { - return nil, diags - } - - // A traversal starting with "module" can either be a reference to - // an entire module instance or to a single output from a module - // instance, depending on what we find after this introducer. - - callInstance := ModuleCallInstance{ - Call: ModuleCall{ - Name: callName, - }, - Key: NoKey, - } - - if len(remain) == 0 { - // Reference to an entire module instance. Might alternatively - // be a reference to a collection of instances of a particular - // module, but the caller will need to deal with that ambiguity - // since we don't have enough context here. - return &Reference{ - Subject: callInstance, - SourceRange: tfdiags.SourceRangeFromHCL(callRange), - Remaining: remain, - }, diags - } - - if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { - var err error - callInstance.Key, err = ParseInstanceKey(idxTrav.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid index key", - Detail: fmt.Sprintf("Invalid index for module instance: %s.", err), - Subject: &idxTrav.SrcRange, - }) - return nil, diags - } - remain = remain[1:] - - if len(remain) == 0 { - // Also a reference to an entire module instance, but we have a key - // now. - return &Reference{ - Subject: callInstance, - SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)), - Remaining: remain, - }, diags - } - } - - if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok { - remain = remain[1:] - return &Reference{ - Subject: ModuleCallOutput{ - Name: attrTrav.Name, - Call: callInstance, - }, - SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)), - Remaining: remain, - }, diags - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: "Module instance objects do not support this operation.", - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - - case "path": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: PathAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "self": - return &Reference{ - Subject: Self, - SourceRange: tfdiags.SourceRangeFromHCL(rootRange), - Remaining: traversal[1:], - }, diags - - case "terraform": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: TerraformAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "var": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: InputVariable{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - default: - return parseResourceRef(ManagedResourceMode, rootRange, traversal) - } -} - -func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if len(traversal) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`, - Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(), - }) - return nil, diags - } - - var typeName, name string - switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode - case hcl.TraverseRoot: - typeName = tt.Name - case hcl.TraverseAttr: - typeName = tt.Name - default: - // If it isn't a TraverseRoot then it must be a "data" reference. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `The "data" object does not support this operation.`, - Subject: traversal[0].SourceRange().Ptr(), - }) - return nil, diags - } - - attrTrav, ok := traversal[1].(hcl.TraverseAttr) - if !ok { - var what string - switch mode { - case DataResourceMode: - what = "data source" - default: - what = "resource type" - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what), - Subject: traversal[1].SourceRange().Ptr(), - }) - return nil, diags - } - name = attrTrav.Name - rng := hcl.RangeBetween(startRange, attrTrav.SrcRange) - remain := traversal[2:] - - resourceAddr := Resource{ - Mode: mode, - Type: typeName, - Name: name, - } - resourceInstAddr := ResourceInstance{ - Resource: resourceAddr, - Key: NoKey, - } - - if len(remain) == 0 { - // This might actually be a reference to the collection of all instances - // of the resource, but we don't have enough context here to decide - // so we'll let the caller resolve that ambiguity. - return &Reference{ - Subject: resourceAddr, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - }, diags - } - - if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { - var err error - resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid index key", - Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err), - Subject: &idxTrav.SrcRange, - }) - return nil, diags - } - remain = remain[1:] - rng = hcl.RangeBetween(rng, idxTrav.SrcRange) - } - - return &Reference{ - Subject: resourceInstAddr, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags -} - -func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - root := traversal.RootName() - rootRange := traversal[0].SourceRange() - - if len(traversal) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root), - Subject: &rootRange, - }) - return "", hcl.Range{}, nil, diags - } - if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok { - return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf("The %q object does not support this operation.", root), - Subject: traversal[1].SourceRange().Ptr(), - }) - return "", hcl.Range{}, nil, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go deleted file mode 100644 index 5b922e8b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go +++ /dev/null @@ -1,240 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Target describes a targeted address with source location information. -type Target struct { - Subject Targetable - SourceRange tfdiags.SourceRange -} - -// ParseTarget attempts to interpret the given traversal as a targetable -// address. The given traversal must be absolute, or this function will -// panic. -// -// If no error diagnostics are returned, the returned target includes the -// address that was extracted and the source range it was extracted from. -// -// If error diagnostics are returned then the Target value is invalid and -// must not be used. -func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { - path, remain, diags := parseModuleInstancePrefix(traversal) - if diags.HasErrors() { - return nil, diags - } - - rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) - - if len(remain) == 0 { - return &Target{ - Subject: path, - SourceRange: rng, - }, diags - } - - mode := ManagedResourceMode - if remain.RootName() == "data" { - mode = DataResourceMode - remain = remain[1:] - } - - if len(remain) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Resource specification must include a resource type and name.", - Subject: remain.SourceRange().Ptr(), - }) - return nil, diags - } - - var typeName, name string - switch tt := remain[0].(type) { - case hcl.TraverseRoot: - typeName = tt.Name - case hcl.TraverseAttr: - typeName = tt.Name - default: - switch mode { - case ManagedResourceMode: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource type name is required.", - Subject: remain[0].SourceRange().Ptr(), - }) - case DataResourceMode: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A data source name is required.", - Subject: remain[0].SourceRange().Ptr(), - }) - default: - panic("unknown mode") - } - return nil, diags - } - - switch tt := remain[1].(type) { - case hcl.TraverseAttr: - name = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource name is required.", - Subject: remain[1].SourceRange().Ptr(), - }) - return nil, diags - } - - var subject Targetable - remain = remain[2:] - switch len(remain) { - case 0: - subject = path.Resource(mode, typeName, name) - case 1: - if tt, ok := remain[0].(hcl.TraverseIndex); ok { - key, err := ParseInstanceKey(tt.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: fmt.Sprintf("Invalid resource instance key: %s.", err), - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - } - - subject = path.ResourceInstance(mode, typeName, name, key) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Resource instance key must be given in square brackets.", - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Unexpected extra operators after address.", - Subject: remain[1].SourceRange().Ptr(), - }) - return nil, diags - } - - return &Target{ - Subject: subject, - SourceRange: rng, - }, diags -} - -// ParseTargetStr is a helper wrapper around ParseTarget that takes a string -// and parses it with the HCL native syntax traversal parser before -// interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a target string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseTarget. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned target may be nil or incomplete. -func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - target, targetDiags := ParseTarget(traversal) - diags = diags.Append(targetDiags) - return target, diags -} - -// ParseAbsResourceInstance attempts to interpret the given traversal as an -// absolute resource instance address, using the same syntax as expected by -// ParseTarget. -// -// If no error diagnostics are returned, the returned target includes the -// address that was extracted and the source range it was extracted from. -// -// If error diagnostics are returned then the AbsResource value is invalid and -// must not be used. -func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { - addr, diags := ParseTarget(traversal) - if diags.HasErrors() { - return AbsResourceInstance{}, diags - } - - switch tt := addr.Subject.(type) { - - case AbsResource: - return tt.Instance(NoKey), diags - - case AbsResourceInstance: - return tt, diags - - case ModuleInstance: // Catch likely user error with specialized message - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResourceInstance{}, diags - - default: // Generic message for other address types - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource address is required here.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResourceInstance{}, diags - - } -} - -// ParseAbsResourceInstanceStr is a helper wrapper around -// ParseAbsResourceInstance that takes a string and parses it with the HCL -// native syntax traversal parser before interpreting it. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned address may be incomplete. -// -// Since this function has no context about the source of the given string, -// any returned diagnostics will not have meaningful source location -// information. -func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsResourceInstance{}, diags - } - - addr, addrDiags := ParseAbsResourceInstance(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go deleted file mode 100644 index cfc13f4b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// PathAttr is the address of an attribute of the "path" object in -// the interpolation scope, like "path.module". -type PathAttr struct { - referenceable - Name string -} - -func (pa PathAttr) String() string { - return "path." + pa.Name -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go deleted file mode 100644 index c6fce1a5..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go +++ /dev/null @@ -1,289 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// ProviderConfig is the address of a provider configuration. -type ProviderConfig struct { - Type string - - // If not empty, Alias identifies which non-default (aliased) provider - // configuration this address refers to. - Alias string -} - -// ParseProviderConfigCompact parses the given absolute traversal as a relative -// provider address in compact form. The following are examples of traversals -// that can be successfully parsed as compact relative provider configuration -// addresses: -// -// aws -// aws.foo -// -// This function will panic if given a relative traversal. -// -// If the returned diagnostics contains errors then the result value is invalid -// and must not be used. -func ParseProviderConfigCompact(traversal hcl.Traversal) (ProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := ProviderConfig{ - Type: traversal.RootName(), - } - - if len(traversal) < 2 { - // Just a type name, then. - return ret, diags - } - - aliasStep := traversal[1] - switch ts := aliasStep.(type) { - case hcl.TraverseAttr: - ret.Alias = ts.Name - return ret, diags - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", - Subject: aliasStep.SourceRange().Ptr(), - }) - } - - if len(traversal) > 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Extraneous extra operators after provider configuration address.", - Subject: traversal[2:].SourceRange().Ptr(), - }) - } - - return ret, diags -} - -// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseProviderConfigCompact. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// then the returned address is invalid. -func ParseProviderConfigCompactStr(str string) (ProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return ProviderConfig{}, diags - } - - addr, addrDiags := ParseProviderConfigCompact(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -// Absolute returns an AbsProviderConfig from the receiver and the given module -// instance address. -func (pc ProviderConfig) Absolute(module ModuleInstance) AbsProviderConfig { - return AbsProviderConfig{ - Module: module, - ProviderConfig: pc, - } -} - -func (pc ProviderConfig) String() string { - if pc.Type == "" { - // Should never happen; always indicates a bug - return "provider." - } - - if pc.Alias != "" { - return fmt.Sprintf("provider.%s.%s", pc.Type, pc.Alias) - } - - return "provider." + pc.Type -} - -// StringCompact is an alternative to String that returns the form that can -// be parsed by ParseProviderConfigCompact, without the "provider." prefix. -func (pc ProviderConfig) StringCompact() string { - if pc.Alias != "" { - return fmt.Sprintf("%s.%s", pc.Type, pc.Alias) - } - return pc.Type -} - -// AbsProviderConfig is the absolute address of a provider configuration -// within a particular module instance. -type AbsProviderConfig struct { - Module ModuleInstance - ProviderConfig ProviderConfig -} - -// ParseAbsProviderConfig parses the given traversal as an absolute provider -// address. The following are examples of traversals that can be successfully -// parsed as absolute provider configuration addresses: -// -// provider.aws -// provider.aws.foo -// module.bar.provider.aws -// module.bar.module.baz.provider.aws.foo -// module.foo[1].provider.aws.foo -// -// This type of address is used, for example, to record the relationships -// between resources and provider configurations in the state structure. -// This type of address is not generally used in the UI, except in error -// messages that refer to provider configurations. -func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { - modInst, remain, diags := parseModuleInstancePrefix(traversal) - ret := AbsProviderConfig{ - Module: modInst, - } - if len(remain) < 2 || remain.RootName() != "provider" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", - Subject: remain.SourceRange().Ptr(), - }) - return ret, diags - } - if len(remain) > 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Extraneous operators after provider configuration alias.", - Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), - }) - return ret, diags - } - - if tt, ok := remain[1].(hcl.TraverseAttr); ok { - ret.ProviderConfig.Type = tt.Name - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The prefix \"provider.\" must be followed by a provider type name.", - Subject: remain[1].SourceRange().Ptr(), - }) - return ret, diags - } - - if len(remain) == 3 { - if tt, ok := remain[2].(hcl.TraverseAttr); ok { - ret.ProviderConfig.Alias = tt.Name - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider type name must be followed by a configuration alias name.", - Subject: remain[2].SourceRange().Ptr(), - }) - return ret, diags - } - } - - return ret, diags -} - -// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseAbsProviderConfig. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned address is invalid. -func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsProviderConfig{}, diags - } - - addr, addrDiags := ParseAbsProviderConfig(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -// ProviderConfigDefault returns the address of the default provider config -// of the given type inside the recieving module instance. -func (m ModuleInstance) ProviderConfigDefault(name string) AbsProviderConfig { - return AbsProviderConfig{ - Module: m, - ProviderConfig: ProviderConfig{ - Type: name, - }, - } -} - -// ProviderConfigAliased returns the address of an aliased provider config -// of with given type and alias inside the recieving module instance. -func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderConfig { - return AbsProviderConfig{ - Module: m, - ProviderConfig: ProviderConfig{ - Type: name, - Alias: alias, - }, - } -} - -// Inherited returns an address that the receiving configuration address might -// inherit from in a parent module. The second bool return value indicates if -// such inheritance is possible, and thus whether the returned address is valid. -// -// Inheritance is possible only for default (un-aliased) providers in modules -// other than the root module. Even if a valid address is returned, inheritence -// may not be performed for other reasons, such as if the calling module -// provided explicit provider configurations within the call for this module. -// The ProviderTransformer graph transform in the main terraform module has -// the authoritative logic for provider inheritance, and this method is here -// mainly just for its benefit. -func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) { - // Can't inherit if we're already in the root. - if len(pc.Module) == 0 { - return AbsProviderConfig{}, false - } - - // Can't inherit if we have an alias. - if pc.ProviderConfig.Alias != "" { - return AbsProviderConfig{}, false - } - - // Otherwise, we might inherit from a configuration with the same - // provider name in the parent module instance. - parentMod := pc.Module.Parent() - return pc.ProviderConfig.Absolute(parentMod), true -} - -func (pc AbsProviderConfig) String() string { - if len(pc.Module) == 0 { - return pc.ProviderConfig.String() - } - return fmt.Sprintf("%s.%s", pc.Module.String(), pc.ProviderConfig.String()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go deleted file mode 100644 index 64b8ac86..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go +++ /dev/null @@ -1,7 +0,0 @@ -package addrs - -// ProviderType encapsulates a single provider type. In the future this will be -// extended to include additional fields including Namespace and SourceHost -type ProviderType struct { - Name string -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go deleted file mode 100644 index 211083a5..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go +++ /dev/null @@ -1,20 +0,0 @@ -package addrs - -// Referenceable is an interface implemented by all address types that can -// appear as references in configuration language expressions. -type Referenceable interface { - // All implementations of this interface must be covered by the type switch - // in lang.Scope.buildEvalContext. - referenceableSigil() - - // String produces a string representation of the address that could be - // parsed as a HCL traversal and passed to ParseRef to produce an identical - // result. - String() string -} - -type referenceable struct { -} - -func (r referenceable) referenceableSigil() { -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go deleted file mode 100644 index 103f8a28..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go +++ /dev/null @@ -1,254 +0,0 @@ -package addrs - -import ( - "fmt" - "strings" -) - -// Resource is an address for a resource block within configuration, which -// contains potentially-multiple resource instances if that configuration -// block uses "count" or "for_each". -type Resource struct { - referenceable - Mode ResourceMode - Type string - Name string -} - -func (r Resource) String() string { - switch r.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", r.Type, r.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", r.Type, r.Name) - default: - // Should never happen, but we'll return a string here rather than - // crashing just in case it does. - return fmt.Sprintf(".%s.%s", r.Type, r.Name) - } -} - -// Instance produces the address for a specific instance of the receiver -// that is idenfied by the given key. -func (r Resource) Instance(key InstanceKey) ResourceInstance { - return ResourceInstance{ - Resource: r, - Key: key, - } -} - -// Absolute returns an AbsResource from the receiver and the given module -// instance address. -func (r Resource) Absolute(module ModuleInstance) AbsResource { - return AbsResource{ - Module: module, - Resource: r, - } -} - -// DefaultProviderConfig returns the address of the provider configuration -// that should be used for the resource identified by the reciever if it -// does not have a provider configuration address explicitly set in -// configuration. -// -// This method is not able to verify that such a configuration exists, nor -// represent the behavior of automatically inheriting certain provider -// configurations from parent modules. It just does a static analysis of the -// receiving address and returns an address to start from, relative to the -// same module that contains the resource. -func (r Resource) DefaultProviderConfig() ProviderConfig { - typeName := r.Type - if under := strings.Index(typeName, "_"); under != -1 { - typeName = typeName[:under] - } - return ProviderConfig{ - Type: typeName, - } -} - -// ResourceInstance is an address for a specific instance of a resource. -// When a resource is defined in configuration with "count" or "for_each" it -// produces zero or more instances, which can be addressed using this type. -type ResourceInstance struct { - referenceable - Resource Resource - Key InstanceKey -} - -func (r ResourceInstance) ContainingResource() Resource { - return r.Resource -} - -func (r ResourceInstance) String() string { - if r.Key == NoKey { - return r.Resource.String() - } - return r.Resource.String() + r.Key.String() -} - -// Absolute returns an AbsResourceInstance from the receiver and the given module -// instance address. -func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance { - return AbsResourceInstance{ - Module: module, - Resource: r, - } -} - -// AbsResource is an absolute address for a resource under a given module path. -type AbsResource struct { - targetable - Module ModuleInstance - Resource Resource -} - -// Resource returns the address of a particular resource within the receiver. -func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource { - return AbsResource{ - Module: m, - Resource: Resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - } -} - -// Instance produces the address for a specific instance of the receiver -// that is idenfied by the given key. -func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance { - return AbsResourceInstance{ - Module: r.Module, - Resource: r.Resource.Instance(key), - } -} - -// TargetContains implements Targetable by returning true if the given other -// address is either equal to the receiver or is an instance of the -// receiver. -func (r AbsResource) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case AbsResource: - // We'll use our stringification as a cheat-ish way to test for equality. - return to.String() == r.String() - - case AbsResourceInstance: - return r.TargetContains(to.ContainingResource()) - - default: - return false - - } -} - -func (r AbsResource) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -// AbsResourceInstance is an absolute address for a resource instance under a -// given module path. -type AbsResourceInstance struct { - targetable - Module ModuleInstance - Resource ResourceInstance -} - -// ResourceInstance returns the address of a particular resource instance within the receiver. -func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance { - return AbsResourceInstance{ - Module: m, - Resource: ResourceInstance{ - Resource: Resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - Key: key, - }, - } -} - -// ContainingResource returns the address of the resource that contains the -// receving resource instance. In other words, it discards the key portion -// of the address to produce an AbsResource value. -func (r AbsResourceInstance) ContainingResource() AbsResource { - return AbsResource{ - Module: r.Module, - Resource: r.Resource.ContainingResource(), - } -} - -// TargetContains implements Targetable by returning true if the given other -// address is equal to the receiver. -func (r AbsResourceInstance) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case AbsResourceInstance: - // We'll use our stringification as a cheat-ish way to test for equality. - return to.String() == r.String() - - default: - return false - - } -} - -func (r AbsResourceInstance) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -// Less returns true if the receiver should sort before the given other value -// in a sorted list of addresses. -func (r AbsResourceInstance) Less(o AbsResourceInstance) bool { - switch { - - case len(r.Module) != len(o.Module): - return len(r.Module) < len(o.Module) - - case r.Module.String() != o.Module.String(): - return r.Module.Less(o.Module) - - case r.Resource.Resource.Mode != o.Resource.Resource.Mode: - return r.Resource.Resource.Mode == DataResourceMode - - case r.Resource.Resource.Type != o.Resource.Resource.Type: - return r.Resource.Resource.Type < o.Resource.Resource.Type - - case r.Resource.Resource.Name != o.Resource.Resource.Name: - return r.Resource.Resource.Name < o.Resource.Resource.Name - - case r.Resource.Key != o.Resource.Key: - return InstanceKeyLess(r.Resource.Key, o.Resource.Key) - - default: - return false - - } -} - -// ResourceMode defines which lifecycle applies to a given resource. Each -// resource lifecycle has a slightly different address format. -type ResourceMode rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type ResourceMode - -const ( - // InvalidResourceMode is the zero value of ResourceMode and is not - // a valid resource mode. - InvalidResourceMode ResourceMode = 0 - - // ManagedResourceMode indicates a managed resource, as defined by - // "resource" blocks in configuration. - ManagedResourceMode ResourceMode = 'M' - - // DataResourceMode indicates a data resource, as defined by - // "data" blocks in configuration. - DataResourceMode ResourceMode = 'D' -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go deleted file mode 100644 index 9bdbdc42..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go +++ /dev/null @@ -1,105 +0,0 @@ -package addrs - -import "fmt" - -// ResourceInstancePhase is a special kind of reference used only internally -// during graph building to represent resource instances that are in a -// non-primary state. -// -// Graph nodes can declare themselves referenceable via an instance phase -// or can declare that they reference an instance phase in order to accomodate -// secondary graph nodes dealing with, for example, destroy actions. -// -// This special reference type cannot be accessed directly by end-users, and -// should never be shown in the UI. -type ResourceInstancePhase struct { - referenceable - ResourceInstance ResourceInstance - Phase ResourceInstancePhaseType -} - -var _ Referenceable = ResourceInstancePhase{} - -// Phase returns a special "phase address" for the receving instance. See the -// documentation of ResourceInstancePhase for the limited situations where this -// is intended to be used. -func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase { - return ResourceInstancePhase{ - ResourceInstance: r, - Phase: rpt, - } -} - -// ContainingResource returns an address for the same phase of the resource -// that this instance belongs to. -func (rp ResourceInstancePhase) ContainingResource() ResourcePhase { - return rp.ResourceInstance.Resource.Phase(rp.Phase) -} - -func (rp ResourceInstancePhase) String() string { - // We use a different separator here than usual to ensure that we'll - // never conflict with any non-phased resource instance string. This - // is intentionally something that would fail parsing with ParseRef, - // because this special address type should never be exposed in the UI. - return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase) -} - -// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase. -type ResourceInstancePhaseType string - -const ( - // ResourceInstancePhaseDestroy represents the "destroy" phase of a - // resource instance. - ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy" - - // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy - // but is used for resources that have "create_before_destroy" set, thus - // requiring a different dependency ordering. - ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd" -) - -func (rpt ResourceInstancePhaseType) String() string { - return string(rpt) -} - -// ResourcePhase is a special kind of reference used only internally -// during graph building to represent resources that are in a -// non-primary state. -// -// Graph nodes can declare themselves referenceable via a resource phase -// or can declare that they reference a resource phase in order to accomodate -// secondary graph nodes dealing with, for example, destroy actions. -// -// Since resources (as opposed to instances) aren't actually phased, this -// address type is used only as an approximation during initial construction -// of the resource-oriented plan graph, under the assumption that resource -// instances with ResourceInstancePhase addresses will be created in dynamic -// subgraphs during the graph walk. -// -// This special reference type cannot be accessed directly by end-users, and -// should never be shown in the UI. -type ResourcePhase struct { - referenceable - Resource Resource - Phase ResourceInstancePhaseType -} - -var _ Referenceable = ResourcePhase{} - -// Phase returns a special "phase address" for the receving instance. See the -// documentation of ResourceInstancePhase for the limited situations where this -// is intended to be used. -func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase { - return ResourcePhase{ - Resource: r, - Phase: rpt, - } -} - -func (rp ResourcePhase) String() string { - // We use a different separator here than usual to ensure that we'll - // never conflict with any non-phased resource instance string. This - // is intentionally something that would fail parsing with ParseRef, - // because this special address type should never be exposed in the UI. - return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go deleted file mode 100644 index 0b5c33f8..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by "stringer -type ResourceMode"; DO NOT EDIT. - -package addrs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidResourceMode-0] - _ = x[ManagedResourceMode-77] - _ = x[DataResourceMode-68] -} - -const ( - _ResourceMode_name_0 = "InvalidResourceMode" - _ResourceMode_name_1 = "DataResourceMode" - _ResourceMode_name_2 = "ManagedResourceMode" -) - -func (i ResourceMode) String() string { - switch { - case i == 0: - return _ResourceMode_name_0 - case i == 68: - return _ResourceMode_name_1 - case i == 77: - return _ResourceMode_name_2 - default: - return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go deleted file mode 100644 index 7f24eaf0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go +++ /dev/null @@ -1,14 +0,0 @@ -package addrs - -// Self is the address of the special object "self" that behaves as an alias -// for a containing object currently in scope. -const Self selfT = 0 - -type selfT int - -func (s selfT) referenceableSigil() { -} - -func (s selfT) String() string { - return "self" -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go deleted file mode 100644 index 16819a5a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go +++ /dev/null @@ -1,26 +0,0 @@ -package addrs - -// Targetable is an interface implemented by all address types that can be -// used as "targets" for selecting sub-graphs of a graph. -type Targetable interface { - targetableSigil() - - // TargetContains returns true if the receiver is considered to contain - // the given other address. Containment, for the purpose of targeting, - // means that if a container address is targeted then all of the - // addresses within it are also implicitly targeted. - // - // A targetable address always contains at least itself. - TargetContains(other Targetable) bool - - // String produces a string representation of the address that could be - // parsed as a HCL traversal and passed to ParseTarget to produce an - // identical result. - String() string -} - -type targetable struct { -} - -func (r targetable) targetableSigil() { -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go deleted file mode 100644 index a880182a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// TerraformAttr is the address of an attribute of the "terraform" object in -// the interpolation scope, like "terraform.workspace". -type TerraformAttr struct { - referenceable - Name string -} - -func (ta TerraformAttr) String() string { - return "terraform." + ta.Name -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go deleted file mode 100644 index c054acf0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go +++ /dev/null @@ -1,295 +0,0 @@ -package format - -import ( - "bufio" - "bytes" - "fmt" - "sort" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcled" - "github.com/hashicorp/hcl/v2/hclparse" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/mitchellh/colorstring" - wordwrap "github.com/mitchellh/go-wordwrap" - "github.com/zclconf/go-cty/cty" -) - -// Diagnostic formats a single diagnostic message. -// -// The width argument specifies at what column the diagnostic messages will -// be wrapped. If set to zero, messages will not be wrapped by this function -// at all. Although the long-form text parts of the message are wrapped, -// not all aspects of the message are guaranteed to fit within the specified -// terminal width. -func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string { - if diag == nil { - // No good reason to pass a nil diagnostic in here... - return "" - } - - var buf bytes.Buffer - - switch diag.Severity() { - case tfdiags.Error: - buf.WriteString(color.Color("\n[bold][red]Error: [reset]")) - case tfdiags.Warning: - buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]")) - default: - // Clear out any coloring that might be applied by Terraform's UI helper, - // so our result is not context-sensitive. - buf.WriteString(color.Color("\n[reset]")) - } - - desc := diag.Description() - sourceRefs := diag.Source() - - // We don't wrap the summary, since we expect it to be terse, and since - // this is where we put the text of a native Go error it may not always - // be pure text that lends itself well to word-wrapping. - fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary) - - if sourceRefs.Subject != nil { - // We'll borrow HCL's range implementation here, because it has some - // handy features to help us produce a nice source code snippet. - highlightRange := sourceRefs.Subject.ToHCL() - snippetRange := highlightRange - if sourceRefs.Context != nil { - snippetRange = sourceRefs.Context.ToHCL() - } - - // Make sure the snippet includes the highlight. This should be true - // for any reasonable diagnostic, but we'll make sure. - snippetRange = hcl.RangeOver(snippetRange, highlightRange) - if snippetRange.Empty() { - snippetRange.End.Byte++ - snippetRange.End.Column++ - } - if highlightRange.Empty() { - highlightRange.End.Byte++ - highlightRange.End.Column++ - } - - var src []byte - if sources != nil { - src = sources[snippetRange.Filename] - } - if src == nil { - // This should generally not happen, as long as sources are always - // loaded through the main loader. We may load things in other - // ways in weird cases, so we'll tolerate it at the expense of - // a not-so-helpful error message. - fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line) - } else { - file, offset := parseRange(src, highlightRange) - - headerRange := highlightRange - - contextStr := hcled.ContextString(file, offset-1) - if contextStr != "" { - contextStr = ", in " + contextStr - } - - fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr) - - // Config snippet rendering - sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) - for sc.Scan() { - lineRange := sc.Range() - if !lineRange.Overlaps(snippetRange) { - continue - } - beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) - before := beforeRange.SliceBytes(src) - highlighted := highlightedRange.SliceBytes(src) - after := afterRange.SliceBytes(src) - fmt.Fprintf( - &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"), - lineRange.Start.Line, - before, highlighted, after, - ) - } - - } - - if fromExpr := diag.FromExpr(); fromExpr != nil { - // We may also be able to generate information about the dynamic - // values of relevant variables at the point of evaluation, then. - // This is particularly useful for expressions that get evaluated - // multiple times with different values, such as blocks using - // "count" and "for_each", or within "for" expressions. - expr := fromExpr.Expression - ctx := fromExpr.EvalContext - vars := expr.Variables() - stmts := make([]string, 0, len(vars)) - seen := make(map[string]struct{}, len(vars)) - Traversals: - for _, traversal := range vars { - for len(traversal) > 1 { - val, diags := traversal.TraverseAbs(ctx) - if diags.HasErrors() { - // Skip anything that generates errors, since we probably - // already have the same error in our diagnostics set - // already. - traversal = traversal[:len(traversal)-1] - continue - } - - traversalStr := traversalStr(traversal) - if _, exists := seen[traversalStr]; exists { - continue Traversals // don't show duplicates when the same variable is referenced multiple times - } - switch { - case !val.IsKnown(): - // Can't say anything about this yet, then. - continue Traversals - case val.IsNull(): - stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr)) - default: - stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val))) - } - seen[traversalStr] = struct{}{} - } - } - - sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? - - if len(stmts) > 0 { - fmt.Fprint(&buf, color.Color(" [dark_gray]|----------------[reset]\n")) - } - for _, stmt := range stmts { - fmt.Fprintf(&buf, color.Color(" [dark_gray]|[reset] %s\n"), stmt) - } - } - - buf.WriteByte('\n') - } - - if desc.Detail != "" { - detail := desc.Detail - if width != 0 { - detail = wordwrap.WrapString(detail, uint(width)) - } - fmt.Fprintf(&buf, "%s\n", detail) - } - - return buf.String() -} - -func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { - filename := rng.Filename - offset := rng.Start.Byte - - // We need to re-parse here to get a *hcl.File we can interrogate. This - // is not awesome since we presumably already parsed the file earlier too, - // but this re-parsing is architecturally simpler than retaining all of - // the hcl.File objects and we only do this in the case of an error anyway - // so the overhead here is not a big problem. - parser := hclparse.NewParser() - var file *hcl.File - var diags hcl.Diagnostics - if strings.HasSuffix(filename, ".json") { - file, diags = parser.ParseJSON(src, filename) - } else { - file, diags = parser.ParseHCL(src, filename) - } - if diags.HasErrors() { - return file, offset - } - - return file, offset -} - -// traversalStr produces a representation of an HCL traversal that is compact, -// resembles HCL native syntax, and is suitable for display in the UI. -func traversalStr(traversal hcl.Traversal) string { - // This is a specialized subset of traversal rendering tailored to - // producing helpful contextual messages in diagnostics. It is not - // comprehensive nor intended to be used for other purposes. - - var buf bytes.Buffer - for _, step := range traversal { - switch tStep := step.(type) { - case hcl.TraverseRoot: - buf.WriteString(tStep.Name) - case hcl.TraverseAttr: - buf.WriteByte('.') - buf.WriteString(tStep.Name) - case hcl.TraverseIndex: - buf.WriteByte('[') - if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { - buf.WriteString(compactValueStr(tStep.Key)) - } else { - // We'll just use a placeholder for more complex values, - // since otherwise our result could grow ridiculously long. - buf.WriteString("...") - } - buf.WriteByte(']') - } - } - return buf.String() -} - -// compactValueStr produces a compact, single-line summary of a given value -// that is suitable for display in the UI. -// -// For primitives it returns a full representation, while for more complex -// types it instead summarizes the type, size, etc to produce something -// that is hopefully still somewhat useful but not as verbose as a rendering -// of the entire data structure. -func compactValueStr(val cty.Value) string { - // This is a specialized subset of value rendering tailored to producing - // helpful but concise messages in diagnostics. It is not comprehensive - // nor intended to be used for other purposes. - - ty := val.Type() - switch { - case val.IsNull(): - return "null" - case !val.IsKnown(): - // Should never happen here because we should filter before we get - // in here, but we'll do something reasonable rather than panic. - return "(not yet known)" - case ty == cty.Bool: - if val.True() { - return "true" - } - return "false" - case ty == cty.Number: - bf := val.AsBigFloat() - return bf.Text('g', 10) - case ty == cty.String: - // Go string syntax is not exactly the same as HCL native string syntax, - // but we'll accept the minor edge-cases where this is different here - // for now, just to get something reasonable here. - return fmt.Sprintf("%q", val.AsString()) - case ty.IsCollectionType() || ty.IsTupleType(): - l := val.LengthInt() - switch l { - case 0: - return "empty " + ty.FriendlyName() - case 1: - return ty.FriendlyName() + " with 1 element" - default: - return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) - } - case ty.IsObjectType(): - atys := ty.AttributeTypes() - l := len(atys) - switch l { - case 0: - return "object with no attributes" - case 1: - var name string - for k := range atys { - name = k - } - return fmt.Sprintf("object with 1 attribute %q", name) - default: - return fmt.Sprintf("object with %d attributes", l) - } - default: - return ty.FriendlyName() - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go deleted file mode 100644 index 0a2aa7d0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go +++ /dev/null @@ -1,1192 +0,0 @@ -package format - -import ( - "bufio" - "bytes" - "fmt" - "sort" - "strings" - - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// ResourceChange returns a string representation of a change to a particular -// resource, for inclusion in user-facing plan output. -// -// The resource schema must be provided along with the change so that the -// formatted change can reflect the configuration structure for the associated -// resource. -// -// If "color" is non-nil, it will be used to color the result. Otherwise, -// no color codes will be included. -func ResourceChange( - change *plans.ResourceInstanceChangeSrc, - tainted bool, - schema *configschema.Block, - color *colorstring.Colorize, -) string { - addr := change.Addr - var buf bytes.Buffer - - if color == nil { - color = &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, - Reset: false, - } - } - - dispAddr := addr.String() - if change.DeposedKey != states.NotDeposed { - dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey) - } - - switch change.Action { - case plans.Create: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr))) - case plans.Read: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be read during apply\n # (config refers to values not yet known)", dispAddr))) - case plans.Update: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr))) - case plans.CreateThenDelete, plans.DeleteThenCreate: - if tainted { - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced", dispAddr))) - } else { - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced", dispAddr))) - } - case plans.Delete: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed", dispAddr))) - default: - // should never happen, since the above is exhaustive - buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) - } - buf.WriteString(color.Color("[reset]\n")) - - switch change.Action { - case plans.Create: - buf.WriteString(color.Color("[green] +[reset] ")) - case plans.Read: - buf.WriteString(color.Color("[cyan] <=[reset] ")) - case plans.Update: - buf.WriteString(color.Color("[yellow] ~[reset] ")) - case plans.DeleteThenCreate: - buf.WriteString(color.Color("[red]-[reset]/[green]+[reset] ")) - case plans.CreateThenDelete: - buf.WriteString(color.Color("[green]+[reset]/[red]-[reset] ")) - case plans.Delete: - buf.WriteString(color.Color("[red] -[reset] ")) - default: - buf.WriteString(color.Color("??? ")) - } - - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - buf.WriteString(fmt.Sprintf( - "resource %q %q", - addr.Resource.Resource.Type, - addr.Resource.Resource.Name, - )) - case addrs.DataResourceMode: - buf.WriteString(fmt.Sprintf( - "data %q %q ", - addr.Resource.Resource.Type, - addr.Resource.Resource.Name, - )) - default: - // should never happen, since the above is exhaustive - buf.WriteString(addr.String()) - } - - buf.WriteString(" {") - - p := blockBodyDiffPrinter{ - buf: &buf, - color: color, - action: change.Action, - requiredReplace: change.RequiredReplace, - } - - // Most commonly-used resources have nested blocks that result in us - // going at least three traversals deep while we recurse here, so we'll - // start with that much capacity and then grow as needed for deeper - // structures. - path := make(cty.Path, 0, 3) - - changeV, err := change.Decode(schema.ImpliedType()) - if err != nil { - // Should never happen in here, since we've already been through - // loads of layers of encode/decode of the planned changes before now. - panic(fmt.Sprintf("failed to decode plan for %s while rendering diff: %s", addr, err)) - } - - // We currently have an opt-out that permits the legacy SDK to return values - // that defy our usual conventions around handling of nesting blocks. To - // avoid the rendering code from needing to handle all of these, we'll - // normalize first. - // (Ideally we'd do this as part of the SDK opt-out implementation in core, - // but we've added it here for now to reduce risk of unexpected impacts - // on other code in core.) - changeV.Change.Before = objchange.NormalizeObjectFromLegacySDK(changeV.Change.Before, schema) - changeV.Change.After = objchange.NormalizeObjectFromLegacySDK(changeV.Change.After, schema) - - bodyWritten := p.writeBlockBodyDiff(schema, changeV.Before, changeV.After, 6, path) - if bodyWritten { - buf.WriteString("\n") - buf.WriteString(strings.Repeat(" ", 4)) - } - buf.WriteString("}\n") - - return buf.String() -} - -type blockBodyDiffPrinter struct { - buf *bytes.Buffer - color *colorstring.Colorize - action plans.Action - requiredReplace cty.PathSet -} - -const forcesNewResourceCaption = " [red]# forces replacement[reset]" - -// writeBlockBodyDiff writes attribute or block differences -// and returns true if any differences were found and written -func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) bool { - path = ctyEnsurePathCapacity(path, 1) - - bodyWritten := false - blankBeforeBlocks := false - { - attrNames := make([]string, 0, len(schema.Attributes)) - attrNameLen := 0 - for name := range schema.Attributes { - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - if oldVal.IsNull() && newVal.IsNull() { - // Skip attributes where both old and new values are null - // (we do this early here so that we'll do our value alignment - // based on the longest attribute name that has a change, rather - // than the longest attribute name in the full set.) - continue - } - - attrNames = append(attrNames, name) - if len(name) > attrNameLen { - attrNameLen = len(name) - } - } - sort.Strings(attrNames) - if len(attrNames) > 0 { - blankBeforeBlocks = true - } - - for _, name := range attrNames { - attrS := schema.Attributes[name] - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - - bodyWritten = true - p.writeAttrDiff(name, attrS, oldVal, newVal, attrNameLen, indent, path) - } - } - - { - blockTypeNames := make([]string, 0, len(schema.BlockTypes)) - for name := range schema.BlockTypes { - blockTypeNames = append(blockTypeNames, name) - } - sort.Strings(blockTypeNames) - - for _, name := range blockTypeNames { - blockS := schema.BlockTypes[name] - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - - bodyWritten = true - p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path) - - // Always include a blank for any subsequent block types. - blankBeforeBlocks = true - } - } - - return bodyWritten -} - -func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) { - path = append(path, cty.GetAttrStep{Name: name}) - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - showJustNew := false - var action plans.Action - switch { - case old.IsNull(): - action = plans.Create - showJustNew = true - case new.IsNull(): - action = plans.Delete - case ctyEqualWithUnknown(old, new): - action = plans.NoOp - showJustNew = true - default: - action = plans.Update - } - - p.writeActionSymbol(action) - - p.buf.WriteString(p.color.Color("[bold]")) - p.buf.WriteString(name) - p.buf.WriteString(p.color.Color("[reset]")) - p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) - p.buf.WriteString(" = ") - - if attrS.Sensitive { - p.buf.WriteString("(sensitive value)") - } else { - switch { - case showJustNew: - p.writeValue(new, action, indent+2) - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - default: - // We show new even if it is null to emphasize the fact - // that it is being unset, since otherwise it is easy to - // misunderstand that the value is still set to the old value. - p.writeValueDiff(old, new, indent+2, path) - } - } -} - -func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) { - path = append(path, cty.GetAttrStep{Name: name}) - if old.IsNull() && new.IsNull() { - // Nothing to do if both old and new is null - return - } - - // Where old/new are collections representing a nesting mode other than - // NestingSingle, we assume the collection value can never be unknown - // since we always produce the container for the nested objects, even if - // the objects within are computed. - - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - var action plans.Action - eqV := new.Equals(old) - switch { - case old.IsNull(): - action = plans.Create - case new.IsNull(): - action = plans.Delete - case !new.IsWhollyKnown() || !old.IsWhollyKnown(): - // "old" should actually always be known due to our contract - // that old values must never be unknown, but we'll allow it - // anyway to be robust. - action = plans.Update - case !eqV.IsKnown() || !eqV.True(): - action = plans.Update - } - - if blankBefore { - p.buf.WriteRune('\n') - } - p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, path) - case configschema.NestingList: - // For the sake of handling nested blocks, we'll treat a null list - // the same as an empty list since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockListAsEmpty(old) - new = ctyNullBlockListAsEmpty(new) - - oldItems := ctyCollectionValues(old) - newItems := ctyCollectionValues(new) - - // Here we intentionally preserve the index-based correspondance - // between old and new, rather than trying to detect insertions - // and removals in the list, because this more accurately reflects - // how Terraform Core and providers will understand the change, - // particularly when the nested block contains computed attributes - // that will themselves maintain correspondance by index. - - // commonLen is number of elements that exist in both lists, which - // will be presented as updates (~). Any additional items in one - // of the lists will be presented as either creates (+) or deletes (-) - // depending on which list they belong to. - var commonLen int - switch { - case len(oldItems) < len(newItems): - commonLen = len(oldItems) - default: - commonLen = len(newItems) - } - - if blankBefore && (len(oldItems) > 0 || len(newItems) > 0) { - p.buf.WriteRune('\n') - } - - for i := 0; i < commonLen; i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - oldItem := oldItems[i] - newItem := newItems[i] - action := plans.Update - if oldItem.RawEquals(newItem) { - action = plans.NoOp - } - p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, path) - } - for i := commonLen; i < len(oldItems); i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - oldItem := oldItems[i] - newItem := cty.NullVal(oldItem.Type()) - p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, path) - } - for i := commonLen; i < len(newItems); i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - newItem := newItems[i] - oldItem := cty.NullVal(newItem.Type()) - p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, path) - } - case configschema.NestingSet: - // For the sake of handling nested blocks, we'll treat a null set - // the same as an empty set since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockSetAsEmpty(old) - new = ctyNullBlockSetAsEmpty(new) - - oldItems := ctyCollectionValues(old) - newItems := ctyCollectionValues(new) - - if (len(oldItems) + len(newItems)) == 0 { - // Nothing to do if both sets are empty - return - } - - allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) - allItems = append(allItems, oldItems...) - allItems = append(allItems, newItems...) - all := cty.SetVal(allItems) - - if blankBefore { - p.buf.WriteRune('\n') - } - - for it := all.ElementIterator(); it.Next(); { - _, val := it.Element() - var action plans.Action - var oldValue, newValue cty.Value - switch { - case !val.IsKnown(): - action = plans.Update - newValue = val - case !old.HasElement(val).True(): - action = plans.Create - oldValue = cty.NullVal(val.Type()) - newValue = val - case !new.HasElement(val).True(): - action = plans.Delete - oldValue = val - newValue = cty.NullVal(val.Type()) - default: - action = plans.NoOp - oldValue = val - newValue = val - } - path := append(path, cty.IndexStep{Key: val}) - p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, path) - } - - case configschema.NestingMap: - // For the sake of handling nested blocks, we'll treat a null map - // the same as an empty map since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockMapAsEmpty(old) - new = ctyNullBlockMapAsEmpty(new) - - oldItems := old.AsValueMap() - newItems := new.AsValueMap() - if (len(oldItems) + len(newItems)) == 0 { - // Nothing to do if both maps are empty - return - } - - allKeys := make(map[string]bool) - for k := range oldItems { - allKeys[k] = true - } - for k := range newItems { - allKeys[k] = true - } - allKeysOrder := make([]string, 0, len(allKeys)) - for k := range allKeys { - allKeysOrder = append(allKeysOrder, k) - } - sort.Strings(allKeysOrder) - - if blankBefore { - p.buf.WriteRune('\n') - } - - for _, k := range allKeysOrder { - var action plans.Action - oldValue := oldItems[k] - newValue := newItems[k] - switch { - case oldValue == cty.NilVal: - oldValue = cty.NullVal(newValue.Type()) - action = plans.Create - case newValue == cty.NilVal: - newValue = cty.NullVal(oldValue.Type()) - action = plans.Delete - case !newValue.RawEquals(oldValue): - action = plans.Update - default: - action = plans.NoOp - } - - path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) - p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, path) - } - } -} - -func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - p.writeActionSymbol(action) - - if label != nil { - fmt.Fprintf(p.buf, "%s %q {", name, *label) - } else { - fmt.Fprintf(p.buf, "%s {", name) - } - - if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - - bodyWritten := p.writeBlockBodyDiff(blockS, old, new, indent+4, path) - if bodyWritten { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - } - p.buf.WriteString("}") -} - -func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) { - if !val.IsKnown() { - p.buf.WriteString("(known after apply)") - return - } - if val.IsNull() { - p.buf.WriteString(p.color.Color("[dark_gray]null[reset]")) - return - } - - ty := val.Type() - - switch { - case ty.IsPrimitiveType(): - switch ty { - case cty.String: - { - // Special behavior for JSON strings containing array or object - src := []byte(val.AsString()) - ty, err := ctyjson.ImpliedType(src) - // check for the special case of "null", which decodes to nil, - // and just allow it to be printed out directly - if err == nil && !ty.IsPrimitiveType() && val.AsString() != "null" { - jv, err := ctyjson.Unmarshal(src, ty) - if err == nil { - p.buf.WriteString("jsonencode(") - if jv.LengthInt() == 0 { - p.writeValue(jv, action, 0) - } else { - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeValue(jv, action, indent+4) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteByte(')') - break // don't *also* do the normal behavior below - } - } - } - fmt.Fprintf(p.buf, "%q", val.AsString()) - case cty.Bool: - if val.True() { - p.buf.WriteString("true") - } else { - p.buf.WriteString("false") - } - case cty.Number: - bf := val.AsBigFloat() - p.buf.WriteString(bf.Text('f', -1)) - default: - // should never happen, since the above is exhaustive - fmt.Fprintf(p.buf, "%#v", val) - } - case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): - p.buf.WriteString("[") - - it := val.ElementIterator() - for it.Next() { - _, val := it.Element() - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.writeValue(val, action, indent+4) - p.buf.WriteString(",") - } - - if val.LengthInt() > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("]") - case ty.IsMapType(): - p.buf.WriteString("{") - - keyLen := 0 - for it := val.ElementIterator(); it.Next(); { - key, _ := it.Element() - if keyStr := key.AsString(); len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - for it := val.ElementIterator(); it.Next(); { - key, val := it.Element() - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.writeValue(key, action, indent+4) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString()))) - p.buf.WriteString(" = ") - p.writeValue(val, action, indent+4) - } - - if val.LengthInt() > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("}") - case ty.IsObjectType(): - p.buf.WriteString("{") - - atys := ty.AttributeTypes() - attrNames := make([]string, 0, len(atys)) - nameLen := 0 - for attrName := range atys { - attrNames = append(attrNames, attrName) - if len(attrName) > nameLen { - nameLen = len(attrName) - } - } - sort.Strings(attrNames) - - for _, attrName := range attrNames { - val := val.GetAttr(attrName) - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.buf.WriteString(attrName) - p.buf.WriteString(strings.Repeat(" ", nameLen-len(attrName))) - p.buf.WriteString(" = ") - p.writeValue(val, action, indent+4) - } - - if len(attrNames) > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("}") - } -} - -func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) { - ty := old.Type() - typesEqual := ctyTypesEqual(ty, new.Type()) - - // We have some specialized diff implementations for certain complex - // values where it's useful to see a visualization of the diff of - // the nested elements rather than just showing the entire old and - // new values verbatim. - // However, these specialized implementations can apply only if both - // values are known and non-null. - if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual { - switch { - case ty == cty.String: - // We have special behavior for both multi-line strings in general - // and for strings that can parse as JSON. For the JSON handling - // to apply, both old and new must be valid JSON. - // For single-line strings that don't parse as JSON we just fall - // out of this switch block and do the default old -> new rendering. - oldS := old.AsString() - newS := new.AsString() - - { - // Special behavior for JSON strings containing object or - // list values. - oldBytes := []byte(oldS) - newBytes := []byte(newS) - oldType, oldErr := ctyjson.ImpliedType(oldBytes) - newType, newErr := ctyjson.ImpliedType(newBytes) - if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) { - oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType) - newJV, newErr := ctyjson.Unmarshal(newBytes, newType) - if oldErr == nil && newErr == nil { - if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace - p.buf.WriteString("jsonencode(") - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(plans.Update) - p.writeValueDiff(oldJV, newJV, indent+4, path) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteByte(')') - } else { - // if they differ only in insigificant whitespace - // then we'll note that but still expand out the - // effective value. - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]")) - } else { - p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]")) - } - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeValue(oldJV, plans.NoOp, indent+4) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteByte(')') - } - return - } - } - } - - if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 { - break - } - - p.buf.WriteString("<<~EOT") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var oldLines, newLines []cty.Value - { - r := strings.NewReader(oldS) - sc := bufio.NewScanner(r) - for sc.Scan() { - oldLines = append(oldLines, cty.StringVal(sc.Text())) - } - } - { - r := strings.NewReader(newS) - sc := bufio.NewScanner(r) - for sc.Scan() { - newLines = append(newLines, cty.StringVal(sc.Text())) - } - } - - diffLines := ctySequenceDiff(oldLines, newLines) - for _, diffLine := range diffLines { - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(diffLine.Action) - - switch diffLine.Action { - case plans.NoOp, plans.Delete: - p.buf.WriteString(diffLine.Before.AsString()) - case plans.Create: - p.buf.WriteString(diffLine.After.AsString()) - default: - // Should never happen since the above covers all - // actions that ctySequenceDiff can return for strings - p.buf.WriteString(diffLine.After.AsString()) - - } - p.buf.WriteString("\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol - p.buf.WriteString("EOT") - - return - - case ty.IsSetType(): - p.buf.WriteString("[") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var addedVals, removedVals, allVals []cty.Value - for it := old.ElementIterator(); it.Next(); { - _, val := it.Element() - allVals = append(allVals, val) - if new.HasElement(val).False() { - removedVals = append(removedVals, val) - } - } - for it := new.ElementIterator(); it.Next(); { - _, val := it.Element() - allVals = append(allVals, val) - if val.IsKnown() && old.HasElement(val).False() { - addedVals = append(addedVals, val) - } - } - - var all, added, removed cty.Value - if len(allVals) > 0 { - all = cty.SetVal(allVals) - } else { - all = cty.SetValEmpty(ty.ElementType()) - } - if len(addedVals) > 0 { - added = cty.SetVal(addedVals) - } else { - added = cty.SetValEmpty(ty.ElementType()) - } - if len(removedVals) > 0 { - removed = cty.SetVal(removedVals) - } else { - removed = cty.SetValEmpty(ty.ElementType()) - } - - for it := all.ElementIterator(); it.Next(); { - _, val := it.Element() - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - - var action plans.Action - switch { - case !val.IsKnown(): - action = plans.Update - case added.HasElement(val).True(): - action = plans.Create - case removed.HasElement(val).True(): - action = plans.Delete - default: - action = plans.NoOp - } - - p.writeActionSymbol(action) - p.writeValue(val, action, indent+4) - p.buf.WriteString(",\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("]") - return - case ty.IsListType() || ty.IsTupleType(): - p.buf.WriteString("[") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice()) - for _, elemDiff := range elemDiffs { - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(elemDiff.Action) - switch elemDiff.Action { - case plans.NoOp, plans.Delete: - p.writeValue(elemDiff.Before, elemDiff.Action, indent+4) - case plans.Update: - p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path) - case plans.Create: - p.writeValue(elemDiff.After, elemDiff.Action, indent+4) - default: - // Should never happen since the above covers all - // actions that ctySequenceDiff can return. - p.writeValue(elemDiff.After, elemDiff.Action, indent+4) - } - - p.buf.WriteString(",\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("]") - return - - case ty.IsMapType(): - p.buf.WriteString("{") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var allKeys []string - keyLen := 0 - for it := old.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - for it := new.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - sort.Strings(allKeys) - - lastK := "" - for i, k := range allKeys { - if i > 0 && lastK == k { - continue // skip duplicates (list is sorted) - } - lastK = k - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - kV := cty.StringVal(k) - var action plans.Action - if old.HasIndex(kV).False() { - action = plans.Create - } else if new.HasIndex(kV).False() { - action = plans.Delete - } else if eqV := old.Index(kV).Equals(new.Index(kV)); eqV.IsKnown() && eqV.True() { - action = plans.NoOp - } else { - action = plans.Update - } - - path := append(path, cty.IndexStep{Key: kV}) - - p.writeActionSymbol(action) - p.writeValue(kV, action, indent+4) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) - p.buf.WriteString(" = ") - switch action { - case plans.Create, plans.NoOp: - v := new.Index(kV) - p.writeValue(v, action, indent+4) - case plans.Delete: - oldV := old.Index(kV) - newV := cty.NullVal(oldV.Type()) - p.writeValueDiff(oldV, newV, indent+4, path) - default: - oldV := old.Index(kV) - newV := new.Index(kV) - p.writeValueDiff(oldV, newV, indent+4, path) - } - - p.buf.WriteByte('\n') - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("}") - return - case ty.IsObjectType(): - p.buf.WriteString("{") - p.buf.WriteString("\n") - - forcesNewResource := p.pathForcesNewResource(path) - - var allKeys []string - keyLen := 0 - for it := old.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - for it := new.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - sort.Strings(allKeys) - - lastK := "" - for i, k := range allKeys { - if i > 0 && lastK == k { - continue // skip duplicates (list is sorted) - } - lastK = k - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - kV := k - var action plans.Action - if !old.Type().HasAttribute(kV) { - action = plans.Create - } else if !new.Type().HasAttribute(kV) { - action = plans.Delete - } else if eqV := old.GetAttr(kV).Equals(new.GetAttr(kV)); eqV.IsKnown() && eqV.True() { - action = plans.NoOp - } else { - action = plans.Update - } - - path := append(path, cty.GetAttrStep{Name: kV}) - - p.writeActionSymbol(action) - p.buf.WriteString(k) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) - p.buf.WriteString(" = ") - - switch action { - case plans.Create, plans.NoOp: - v := new.GetAttr(kV) - p.writeValue(v, action, indent+4) - case plans.Delete: - oldV := old.GetAttr(kV) - newV := cty.NullVal(oldV.Type()) - p.writeValueDiff(oldV, newV, indent+4, path) - default: - oldV := old.GetAttr(kV) - newV := new.GetAttr(kV) - p.writeValueDiff(oldV, newV, indent+4, path) - } - - p.buf.WriteString("\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("}") - - if forcesNewResource { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - return - } - } - - // In all other cases, we just show the new and old values as-is - p.writeValue(old, plans.Delete, indent) - if new.IsNull() { - p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] ")) - } else { - p.buf.WriteString(p.color.Color(" [yellow]->[reset] ")) - } - - p.writeValue(new, plans.Create, indent) - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } -} - -// writeActionSymbol writes a symbol to represent the given action, followed -// by a space. -// -// It only supports the actions that can be represented with a single character: -// Create, Delete, Update and NoAction. -func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) { - switch action { - case plans.Create: - p.buf.WriteString(p.color.Color("[green]+[reset] ")) - case plans.Delete: - p.buf.WriteString(p.color.Color("[red]-[reset] ")) - case plans.Update: - p.buf.WriteString(p.color.Color("[yellow]~[reset] ")) - case plans.NoOp: - p.buf.WriteString(" ") - default: - // Should never happen - p.buf.WriteString(p.color.Color("? ")) - } -} - -func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool { - if !p.action.IsReplace() { - // "requiredReplace" only applies when the instance is being replaced - return false - } - return p.requiredReplace.Has(path) -} - -func ctyEmptyString(value cty.Value) bool { - if !value.IsNull() && value.IsKnown() { - valueType := value.Type() - if valueType == cty.String && value.AsString() == "" { - return true - } - } - return false -} - -func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value { - attrType := val.Type().AttributeType(name) - - if val.IsNull() { - return cty.NullVal(attrType) - } - - // We treat "" as null here - // as existing SDK doesn't support null yet. - // This allows us to avoid spurious diffs - // until we introduce null to the SDK. - attrValue := val.GetAttr(name) - if ctyEmptyString(attrValue) { - return cty.NullVal(attrType) - } - - return attrValue -} - -func ctyCollectionValues(val cty.Value) []cty.Value { - if !val.IsKnown() || val.IsNull() { - return nil - } - - ret := make([]cty.Value, 0, val.LengthInt()) - for it := val.ElementIterator(); it.Next(); { - _, value := it.Element() - ret = append(ret, value) - } - return ret -} - -// ctySequenceDiff returns differences between given sequences of cty.Value(s) -// in the form of Create, Delete, or Update actions (for objects). -func ctySequenceDiff(old, new []cty.Value) []*plans.Change { - var ret []*plans.Change - lcs := objchange.LongestCommonSubsequence(old, new) - var oldI, newI, lcsI int - for oldI < len(old) || newI < len(new) || lcsI < len(lcs) { - for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) { - isObjectDiff := old[oldI].Type().IsObjectType() && (newI >= len(new) || new[newI].Type().IsObjectType()) - if isObjectDiff && newI < len(new) { - ret = append(ret, &plans.Change{ - Action: plans.Update, - Before: old[oldI], - After: new[newI], - }) - oldI++ - newI++ // we also consume the next "new" in this case - continue - } - - ret = append(ret, &plans.Change{ - Action: plans.Delete, - Before: old[oldI], - After: cty.NullVal(old[oldI].Type()), - }) - oldI++ - } - for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) { - ret = append(ret, &plans.Change{ - Action: plans.Create, - Before: cty.NullVal(new[newI].Type()), - After: new[newI], - }) - newI++ - } - if lcsI < len(lcs) { - ret = append(ret, &plans.Change{ - Action: plans.NoOp, - Before: lcs[lcsI], - After: lcs[lcsI], - }) - - // All of our indexes advance together now, since the line - // is common to all three sequences. - lcsI++ - oldI++ - newI++ - } - } - return ret -} - -func ctyEqualWithUnknown(old, new cty.Value) bool { - if !old.IsWhollyKnown() || !new.IsWhollyKnown() { - return false - } - return old.Equals(new).True() -} - -// ctyTypesEqual checks equality of two types more loosely -// by avoiding checks of object/tuple elements -// as we render differences on element-by-element basis anyway -func ctyTypesEqual(oldT, newT cty.Type) bool { - if oldT.IsObjectType() && newT.IsObjectType() { - return true - } - if oldT.IsTupleType() && newT.IsTupleType() { - return true - } - return oldT.Equals(newT) -} - -func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path { - if cap(path)-len(path) >= minExtra { - return path - } - newCap := cap(path) * 2 - if newCap < (len(path) + minExtra) { - newCap = len(path) + minExtra - } - newPath := make(cty.Path, len(path), newCap) - copy(newPath, path) - return newPath -} - -// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -// -// In particular, this function handles the special situation where a "list" is -// actually represented as a tuple type where nested blocks contain -// dynamically-typed values. -func ctyNullBlockListAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - if ty := in.Type(); ty.IsListType() { - return cty.ListValEmpty(ty.ElementType()) - } - return cty.EmptyTupleVal // must need a tuple, then -} - -// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -// -// In particular, this function handles the special situation where a "map" is -// actually represented as an object type where nested blocks contain -// dynamically-typed values. -func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - if ty := in.Type(); ty.IsMapType() { - return cty.MapValEmpty(ty.ElementType()) - } - return cty.EmptyObjectVal // must need an object, then -} - -// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - // Dynamically-typed attributes are not supported inside blocks backed by - // sets, so our result here is always a set. - return cty.SetValEmpty(in.Type().ElementType()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go deleted file mode 100644 index aa8d7deb..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package format contains helpers for formatting various Terraform -// structures for human-readabout output. -// -// This package is used by the official Terraform CLI in formatting any -// output and is exported to encourage non-official frontends to mimic the -// output formatting as much as possible so that text formats of Terraform -// structures have a consistent look and feel. -package format diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go deleted file mode 100644 index 85ebbfec..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go +++ /dev/null @@ -1,123 +0,0 @@ -package format - -import ( - "github.com/zclconf/go-cty/cty" -) - -// ObjectValueID takes a value that is assumed to be an object representation -// of some resource instance object and attempts to heuristically find an -// attribute of it that is likely to be a unique identifier in the remote -// system that it belongs to which will be useful to the user. -// -// If such an attribute is found, its name and string value intended for -// display are returned. Both returned strings are empty if no such attribute -// exists, in which case the caller should assume that the resource instance -// address within the Terraform configuration is the best available identifier. -// -// This is only a best-effort sort of thing, relying on naming conventions in -// our resource type schemas. The result is not guaranteed to be unique, but -// should generally be suitable for display to an end-user anyway. -// -// This function will panic if the given value is not of an object type. -func ObjectValueID(obj cty.Value) (k, v string) { - if obj.IsNull() || !obj.IsKnown() { - return "", "" - } - - atys := obj.Type().AttributeTypes() - - switch { - - case atys["id"] == cty.String: - v := obj.GetAttr("id") - if v.IsKnown() && !v.IsNull() { - return "id", v.AsString() - } - - case atys["name"] == cty.String: - // "name" isn't always globally unique, but if there isn't also an - // "id" then it _often_ is, in practice. - v := obj.GetAttr("name") - if v.IsKnown() && !v.IsNull() { - return "name", v.AsString() - } - } - - return "", "" -} - -// ObjectValueName takes a value that is assumed to be an object representation -// of some resource instance object and attempts to heuristically find an -// attribute of it that is likely to be a human-friendly name in the remote -// system that it belongs to which will be useful to the user. -// -// If such an attribute is found, its name and string value intended for -// display are returned. Both returned strings are empty if no such attribute -// exists, in which case the caller should assume that the resource instance -// address within the Terraform configuration is the best available identifier. -// -// This is only a best-effort sort of thing, relying on naming conventions in -// our resource type schemas. The result is not guaranteed to be unique, but -// should generally be suitable for display to an end-user anyway. -// -// Callers that use both ObjectValueName and ObjectValueID at the same time -// should be prepared to get the same attribute key and value from both in -// some cases, since there is overlap betweek the id-extraction and -// name-extraction heuristics. -// -// This function will panic if the given value is not of an object type. -func ObjectValueName(obj cty.Value) (k, v string) { - if obj.IsNull() || !obj.IsKnown() { - return "", "" - } - - atys := obj.Type().AttributeTypes() - - switch { - - case atys["name"] == cty.String: - v := obj.GetAttr("name") - if v.IsKnown() && !v.IsNull() { - return "name", v.AsString() - } - - case atys["tags"].IsMapType() && atys["tags"].ElementType() == cty.String: - tags := obj.GetAttr("tags") - if tags.IsNull() || !tags.IsWhollyKnown() { - break - } - - switch { - case tags.HasIndex(cty.StringVal("name")).RawEquals(cty.True): - v := tags.Index(cty.StringVal("name")) - if v.IsKnown() && !v.IsNull() { - return "tags.name", v.AsString() - } - case tags.HasIndex(cty.StringVal("Name")).RawEquals(cty.True): - // AWS-style naming convention - v := tags.Index(cty.StringVal("Name")) - if v.IsKnown() && !v.IsNull() { - return "tags.Name", v.AsString() - } - } - } - - return "", "" -} - -// ObjectValueIDOrName is a convenience wrapper around both ObjectValueID -// and ObjectValueName (in that preference order) to try to extract some sort -// of human-friendly descriptive string value for an object as additional -// context about an object when it is being displayed in a compact way (where -// not all of the attributes are visible.) -// -// Just as with the two functions it wraps, it is a best-effort and may return -// two empty strings if no suitable attribute can be found for a given object. -func ObjectValueIDOrName(obj cty.Value) (k, v string) { - k, v = ObjectValueID(obj) - if k != "" { - return - } - k, v = ObjectValueName(obj) - return -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go deleted file mode 100644 index 14869ad3..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go +++ /dev/null @@ -1,208 +0,0 @@ -package format - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" -) - -// StateOpts are the options for formatting a state. -type StateOpts struct { - // State is the state to format. This is required. - State *states.State - - // Schemas are used to decode attributes. This is required. - Schemas *terraform.Schemas - - // Color is the colorizer. This is optional. - Color *colorstring.Colorize -} - -// State takes a state and returns a string -func State(opts *StateOpts) string { - if opts.Color == nil { - panic("colorize not given") - } - - if opts.Schemas == nil { - panic("schemas not given") - } - - s := opts.State - if len(s.Modules) == 0 { - return "The state file is empty. No resources are represented." - } - - buf := bytes.NewBufferString("[reset]") - p := blockBodyDiffPrinter{ - buf: buf, - color: opts.Color, - action: plans.NoOp, - } - - // Format all the modules - for _, m := range s.Modules { - formatStateModule(p, m, opts.Schemas) - } - - // Write the outputs for the root module - m := s.RootModule() - - if m.OutputValues != nil { - if len(m.OutputValues) > 0 { - p.buf.WriteString("Outputs:\n\n") - } - - // Sort the outputs - ks := make([]string, 0, len(m.OutputValues)) - for k := range m.OutputValues { - ks = append(ks, k) - } - sort.Strings(ks) - - // Output each output k/v pair - for _, k := range ks { - v := m.OutputValues[k] - p.buf.WriteString(fmt.Sprintf("%s = ", k)) - p.writeValue(v.Value, plans.NoOp, 0) - p.buf.WriteString("\n") - } - } - - trimmedOutput := strings.TrimSpace(p.buf.String()) - trimmedOutput += "[reset]" - - return opts.Color.Color(trimmedOutput) - -} - -func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) { - // First get the names of all the resources so we can show them - // in alphabetical order. - names := make([]string, 0, len(m.Resources)) - for name := range m.Resources { - names = append(names, name) - } - sort.Strings(names) - - // Go through each resource and begin building up the output. - for _, key := range names { - for k, v := range m.Resources[key].Instances { - // keep these in order to keep the current object first, and - // provide deterministic output for the deposed objects - type obj struct { - header string - instance *states.ResourceInstanceObjectSrc - } - instances := []obj{} - - addr := m.Resources[key].Addr - - taintStr := "" - if v.Current != nil && v.Current.Status == 'T' { - taintStr = " (tainted)" - } - - instances = append(instances, - obj{fmt.Sprintf("# %s:%s\n", addr.Absolute(m.Addr).Instance(k), taintStr), v.Current}) - - for dk, v := range v.Deposed { - instances = append(instances, - obj{fmt.Sprintf("# %s: (deposed object %s)\n", addr.Absolute(m.Addr).Instance(k), dk), v}) - } - - // Sort the instances for consistent output. - // Starting the sort from the second index, so the current instance - // is always first. - sort.Slice(instances[1:], func(i, j int) bool { - return instances[i+1].header < instances[j+1].header - }) - - for _, obj := range instances { - header := obj.header - instance := obj.instance - p.buf.WriteString(header) - if instance == nil { - // this shouldn't happen, but there's nothing to do here so - // don't panic below. - continue - } - - var schema *configschema.Block - provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact() - if _, exists := schemas.Providers[provider]; !exists { - // This should never happen in normal use because we should've - // loaded all of the schemas and checked things prior to this - // point. We can't return errors here, but since this is UI code - // we will try to do _something_ reasonable. - p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider)) - continue - } - - switch addr.Mode { - case addrs.ManagedResourceMode: - schema, _ = schemas.ResourceTypeConfig( - provider, - addr.Mode, - addr.Type, - ) - if schema == nil { - p.buf.WriteString(fmt.Sprintf( - "# missing schema for provider %q resource type %s\n\n", provider, addr.Type)) - continue - } - - p.buf.WriteString(fmt.Sprintf( - "resource %q %q {", - addr.Type, - addr.Name, - )) - case addrs.DataResourceMode: - schema, _ = schemas.ResourceTypeConfig( - provider, - addr.Mode, - addr.Type, - ) - if schema == nil { - p.buf.WriteString(fmt.Sprintf( - "# missing schema for provider %q data source %s\n\n", provider, addr.Type)) - continue - } - - p.buf.WriteString(fmt.Sprintf( - "data %q %q {", - addr.Type, - addr.Name, - )) - default: - // should never happen, since the above is exhaustive - p.buf.WriteString(addr.String()) - } - - val, err := instance.Decode(schema.ImpliedType()) - if err != nil { - fmt.Println(err.Error()) - break - } - - path := make(cty.Path, 0, 3) - bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path) - if bodyWritten { - p.buf.WriteString("\n") - } - - p.buf.WriteString("}\n\n") - } - } - } - p.buf.WriteString("\n") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go deleted file mode 100644 index 76d161d7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go +++ /dev/null @@ -1,24 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -// Backend represents a "backend" block inside a "terraform" block in a module -// or file. -type Backend struct { - Type string - Config hcl.Body - - TypeRange hcl.Range - DeclRange hcl.Range -} - -func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { - return &Backend{ - Type: block.Labels[0], - TypeRange: block.LabelRanges[0], - Config: block.Body, - DeclRange: block.DefRange, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go deleted file mode 100644 index e594ebd4..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go +++ /dev/null @@ -1,116 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// ------------------------------------------------------------------------- -// Functions in this file are compatibility shims intended to ease conversion -// from the old configuration loader. Any use of these functions that makes -// a change should generate a deprecation warning explaining to the user how -// to update their code for new patterns. -// -// Shims are particularly important for any patterns that have been widely -// documented in books, tutorials, etc. Users will still be starting from -// these examples and we want to help them adopt the latest patterns rather -// than leave them stranded. -// ------------------------------------------------------------------------- - -// shimTraversalInString takes any arbitrary expression and checks if it is -// a quoted string in the native syntax. If it _is_, then it is parsed as a -// traversal and re-wrapped into a synthetic traversal expression and a -// warning is generated. Otherwise, the given expression is just returned -// verbatim. -// -// This function has no effect on expressions from the JSON syntax, since -// traversals in strings are the required pattern in that syntax. -// -// If wantKeyword is set, the generated warning diagnostic will talk about -// keywords rather than references. The behavior is otherwise unchanged, and -// the caller remains responsible for checking that the result is indeed -// a keyword, e.g. using hcl.ExprAsKeyword. -func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) { - // ObjectConsKeyExpr is a special wrapper type used for keys on object - // constructors to deal with the fact that naked identifiers are normally - // handled as "bareword" strings rather than as variable references. Since - // we know we're interpreting as a traversal anyway (and thus it won't - // matter whether it's a string or an identifier) we can safely just unwrap - // here and then process whatever we find inside as normal. - if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok { - expr = ocke.Wrapped - } - - if !exprIsNativeQuotedString(expr) { - return expr, nil - } - - strVal, diags := expr.Value(nil) - if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() { - // Since we're not even able to attempt a shim here, we'll discard - // the diagnostics we saw so far and let the caller's own error - // handling take care of reporting the invalid expression. - return expr, nil - } - - // The position handling here isn't _quite_ right because it won't - // take into account any escape sequences in the literal string, but - // it should be close enough for any error reporting to make sense. - srcRange := expr.Range() - startPos := srcRange.Start // copy - startPos.Column++ // skip initial quote - startPos.Byte++ // skip initial quote - - traversal, tDiags := hclsyntax.ParseTraversalAbs( - []byte(strVal.AsString()), - srcRange.Filename, - startPos, - ) - diags = append(diags, tDiags...) - - // For initial release our deprecation warnings are disabled to allow - // a period where modules can be compatible with both old and new - // conventions. - // FIXME: Re-enable these deprecation warnings in a release prior to - // Terraform 0.13 and then remove the shims altogether for 0.13. - /* - if wantKeyword { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted keywords are deprecated", - Detail: "In this context, keywords are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this keyword to silence this warning.", - Subject: &srcRange, - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted references are deprecated", - Detail: "In this context, references are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this reference to silence this warning.", - Subject: &srcRange, - }) - } - */ - - return &hclsyntax.ScopeTraversalExpr{ - Traversal: traversal, - SrcRange: srcRange, - }, diags -} - -// shimIsIgnoreChangesStar returns true if the given expression seems to be -// a string literal whose value is "*". This is used to support a legacy -// form of ignore_changes = all . -// -// This function does not itself emit any diagnostics, so it's the caller's -// responsibility to emit a warning diagnostic when this function returns true. -func shimIsIgnoreChangesStar(expr hcl.Expression) bool { - val, valDiags := expr.Value(nil) - if valDiags.HasErrors() { - return false - } - if val.Type() != cty.String || val.IsNull() || !val.IsKnown() { - return false - } - return val.AsString() == "*" -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go deleted file mode 100644 index 82c88a10..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go +++ /dev/null @@ -1,164 +0,0 @@ -package configs - -import ( - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// A Config is a node in the tree of modules within a configuration. -// -// The module tree is constructed by following ModuleCall instances recursively -// through the root module transitively into descendent modules. -// -// A module tree described in *this* package represents the static tree -// represented by configuration. During evaluation a static ModuleNode may -// expand into zero or more module instances depending on the use of count and -// for_each configuration attributes within each call. -type Config struct { - // RootModule points to the Config for the root module within the same - // module tree as this module. If this module _is_ the root module then - // this is self-referential. - Root *Config - - // ParentModule points to the Config for the module that directly calls - // this module. If this is the root module then this field is nil. - Parent *Config - - // Path is a sequence of module logical names that traverse from the root - // module to this config. Path is empty for the root module. - // - // This should only be used to display paths to the end-user in rare cases - // where we are talking about the static module tree, before module calls - // have been resolved. In most cases, an addrs.ModuleInstance describing - // a node in the dynamic module tree is better, since it will then include - // any keys resulting from evaluating "count" and "for_each" arguments. - Path addrs.Module - - // ChildModules points to the Config for each of the direct child modules - // called from this module. The keys in this map match the keys in - // Module.ModuleCalls. - Children map[string]*Config - - // Module points to the object describing the configuration for the - // various elements (variables, resources, etc) defined by this module. - Module *Module - - // CallRange is the source range for the header of the module block that - // requested this module. - // - // This field is meaningless for the root module, where its contents are undefined. - CallRange hcl.Range - - // SourceAddr is the source address that the referenced module was requested - // from, as specified in configuration. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddr string - - // SourceAddrRange is the location in the configuration source where the - // SourceAddr value was set, for use in diagnostic messages. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddrRange hcl.Range - - // Version is the specific version that was selected for this module, - // based on version constraints given in configuration. - // - // This field is nil if the module was loaded from a non-registry source, - // since versions are not supported for other sources. - // - // This field is meaningless for the root module, where it will always - // be nil. - Version *version.Version -} - -// NewEmptyConfig constructs a single-node configuration tree with an empty -// root module. This is generally a pretty useless thing to do, so most callers -// should instead use BuildConfig. -func NewEmptyConfig() *Config { - ret := &Config{} - ret.Root = ret - ret.Children = make(map[string]*Config) - ret.Module = &Module{} - return ret -} - -// DeepEach calls the given function once for each module in the tree, starting -// with the receiver. -// -// A parent is always called before its children and children of a particular -// node are visited in lexicographic order by their names. -func (c *Config) DeepEach(cb func(c *Config)) { - cb(c) - - names := make([]string, 0, len(c.Children)) - for name := range c.Children { - names = append(names, name) - } - - for _, name := range names { - c.Children[name].DeepEach(cb) - } -} - -// DescendentForInstance is like Descendent except that it accepts a path -// to a particular module instance in the dynamic module graph, returning -// the node from the static module graph that corresponds to it. -// -// All instances created by a particular module call share the same -// configuration, so the keys within the given path are disregarded. -func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { - current := c - for _, step := range path { - current = current.Children[step.Name] - if current == nil { - return nil - } - } - return current -} - -// ProviderTypes returns the names of each distinct provider type referenced -// in the receiving configuration. -// -// This is a helper for easily determining which provider types are required -// to fully interpret the configuration, though it does not include version -// information and so callers are expected to have already dealt with -// provider version selection in an earlier step and have identified suitable -// versions for each provider. -func (c *Config) ProviderTypes() []string { - m := make(map[string]struct{}) - c.gatherProviderTypes(m) - - ret := make([]string, 0, len(m)) - for k := range m { - ret = append(ret, k) - } - sort.Strings(ret) - return ret -} -func (c *Config) gatherProviderTypes(m map[string]struct{}) { - if c == nil { - return - } - - for _, pc := range c.Module.ProviderConfigs { - m[pc.Name] = struct{}{} - } - for _, rc := range c.Module.ManagedResources { - providerAddr := rc.ProviderConfigAddr() - m[providerAddr.Type] = struct{}{} - } - for _, rc := range c.Module.DataResources { - providerAddr := rc.ProviderConfigAddr() - m[providerAddr.Type] = struct{}{} - } - - // Must also visit our child modules, recursively. - for _, cc := range c.Children { - cc.gatherProviderTypes(m) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go deleted file mode 100644 index cb46b65a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go +++ /dev/null @@ -1,160 +0,0 @@ -package configs - -import ( - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// BuildConfig constructs a Config from a root module by loading all of its -// descendent modules via the given ModuleWalker. -// -// The result is a module tree that has so far only had basic module- and -// file-level invariants validated. If the returned diagnostics contains errors, -// the returned module tree may be incomplete but can still be used carefully -// for static analysis. -func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { - var diags hcl.Diagnostics - cfg := &Config{ - Module: root, - } - cfg.Root = cfg // Root module is self-referential. - cfg.Children, diags = buildChildModules(cfg, walker) - return cfg, diags -} - -func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { - var diags hcl.Diagnostics - ret := map[string]*Config{} - - calls := parent.Module.ModuleCalls - - // We'll sort the calls by their local names so that they'll appear in a - // predictable order in any logging that's produced during the walk. - callNames := make([]string, 0, len(calls)) - for k := range calls { - callNames = append(callNames, k) - } - sort.Strings(callNames) - - for _, callName := range callNames { - call := calls[callName] - path := make([]string, len(parent.Path)+1) - copy(path, parent.Path) - path[len(path)-1] = call.Name - - req := ModuleRequest{ - Name: call.Name, - Path: path, - SourceAddr: call.SourceAddr, - SourceAddrRange: call.SourceAddrRange, - VersionConstraint: call.Version, - Parent: parent, - CallRange: call.DeclRange, - } - - mod, ver, modDiags := walker.LoadModule(&req) - diags = append(diags, modDiags...) - if mod == nil { - // nil can be returned if the source address was invalid and so - // nothing could be loaded whatsoever. LoadModule should've - // returned at least one error diagnostic in that case. - continue - } - - child := &Config{ - Parent: parent, - Root: parent.Root, - Path: path, - Module: mod, - CallRange: call.DeclRange, - SourceAddr: call.SourceAddr, - SourceAddrRange: call.SourceAddrRange, - Version: ver, - } - - child.Children, modDiags = buildChildModules(child, walker) - diags = append(diags, modDiags...) - - ret[call.Name] = child - } - - return ret, diags -} - -// A ModuleWalker knows how to find and load a child module given details about -// the module to be loaded and a reference to its partially-loaded parent -// Config. -type ModuleWalker interface { - // LoadModule finds and loads a requested child module. - // - // If errors are detected during loading, implementations should return them - // in the diagnostics object. If the diagnostics object contains any errors - // then the caller will tolerate the returned module being nil or incomplete. - // If no errors are returned, it should be non-nil and complete. - // - // Full validation need not have been performed but an implementation should - // ensure that the basic file- and module-validations performed by the - // LoadConfigDir function (valid syntax, no namespace collisions, etc) have - // been performed before returning a module. - LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) -} - -// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps -// a callback function, for more convenient use of that interface. -type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) - -// LoadModule implements ModuleWalker. -func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { - return f(req) -} - -// ModuleRequest is used with the ModuleWalker interface to describe a child -// module that must be loaded. -type ModuleRequest struct { - // Name is the "logical name" of the module call within configuration. - // This is provided in case the name is used as part of a storage key - // for the module, but implementations must otherwise treat it as an - // opaque string. It is guaranteed to have already been validated as an - // HCL identifier and UTF-8 encoded. - Name string - - // Path is a list of logical names that traverse from the root module to - // this module. This can be used, for example, to form a lookup key for - // each distinct module call in a configuration, allowing for multiple - // calls with the same name at different points in the tree. - Path addrs.Module - - // SourceAddr is the source address string provided by the user in - // configuration. - SourceAddr string - - // SourceAddrRange is the source range for the SourceAddr value as it - // was provided in configuration. This can and should be used to generate - // diagnostics about the source address having invalid syntax, referring - // to a non-existent object, etc. - SourceAddrRange hcl.Range - - // VersionConstraint is the version constraint applied to the module in - // configuration. This data structure includes the source range for - // the constraint, which can and should be used to generate diagnostics - // about constraint-related issues, such as constraints that eliminate all - // available versions of a module whose source is otherwise valid. - VersionConstraint VersionConstraint - - // Parent is the partially-constructed module tree node that the loaded - // module will be added to. Callers may refer to any field of this - // structure except Children, which is still under construction when - // ModuleRequest objects are created and thus has undefined content. - // The main reason this is provided is so that full module paths can - // be constructed for uniqueness. - Parent *Config - - // CallRange is the source range for the header of the "module" block - // in configuration that prompted this request. This can be used as the - // subject of an error diagnostic that relates to the module call itself, - // rather than to either its source address or its version number. - CallRange hcl.Range -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go deleted file mode 100644 index ebbeb3b6..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go +++ /dev/null @@ -1,125 +0,0 @@ -package configload - -import ( - "io" - "os" - "path/filepath" - "strings" -) - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -func copyDir(dst, src string) error { - src, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if path == src { - return nil - } - - if strings.HasPrefix(filepath.Base(path), ".") { - // Skip any dot files - if info.IsDir() { - return filepath.SkipDir - } else { - return nil - } - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // we don't want to try and copy the same file over itself. - if eq, err := sameFile(path, dstPath); eq { - return nil - } else if err != nil { - return err - } - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - - return nil - } - - // If the current path is a symlink, recreate the symlink relative to - // the dst directory - if info.Mode()&os.ModeSymlink == os.ModeSymlink { - target, err := os.Readlink(path) - if err != nil { - return err - } - - return os.Symlink(target, dstPath) - } - - // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := io.Copy(dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) - } - - return filepath.Walk(src, walkFn) -} - -// sameFile tried to determine if to paths are the same file. -// If the paths don't match, we lookup the inode on supported systems. -func sameFile(a, b string) (bool, error) { - if a == b { - return true, nil - } - - aIno, err := inode(a) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - bIno, err := inode(b) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - if aIno > 0 && aIno == bIno { - return true, nil - } - - return false, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go deleted file mode 100644 index 8b615f90..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package configload knows how to install modules into the .terraform/modules -// directory and to load modules from those installed locations. It is used -// in conjunction with the LoadConfig function in the parent package. -package configload diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go deleted file mode 100644 index 57df0414..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux darwin openbsd netbsd solaris dragonfly - -package configload - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return st.Ino, nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go deleted file mode 100644 index 4dc28eaa..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build freebsd - -package configload - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return uint64(st.Ino), nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go deleted file mode 100644 index 0d22e672..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package configload - -// no syscall.Stat_t on windows, return 0 for inodes -func inode(path string) (uint64, error) { - return 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go deleted file mode 100644 index 0d12d7d2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go +++ /dev/null @@ -1,101 +0,0 @@ -package configload - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/spf13/afero" -) - -// A Loader instance is the main entry-point for loading configurations via -// this package. -// -// It extends the general config-loading functionality in the parent package -// "configs" to support installation of modules from remote sources and -// loading full configurations using modules that were previously installed. -type Loader struct { - // parser is used to read configuration - parser *configs.Parser - - // modules is used to install and locate descendent modules that are - // referenced (directly or indirectly) from the root module. - modules moduleMgr -} - -// Config is used with NewLoader to specify configuration arguments for the -// loader. -type Config struct { - // ModulesDir is a path to a directory where descendent modules are - // (or should be) installed. (This is usually the - // .terraform/modules directory, in the common case where this package - // is being loaded from the main Terraform CLI package.) - ModulesDir string - - // Services is the service discovery client to use when locating remote - // module registry endpoints. If this is nil then registry sources are - // not supported, which should be true only in specialized circumstances - // such as in tests. - Services *disco.Disco -} - -// NewLoader creates and returns a loader that reads configuration from the -// real OS filesystem. -// -// The loader has some internal state about the modules that are currently -// installed, which is read from disk as part of this function. If that -// manifest cannot be read then an error will be returned. -func NewLoader(config *Config) (*Loader, error) { - fs := afero.NewOsFs() - parser := configs.NewParser(fs) - reg := registry.NewClient(config.Services, nil) - - ret := &Loader{ - parser: parser, - modules: moduleMgr{ - FS: afero.Afero{Fs: fs}, - CanInstall: true, - Dir: config.ModulesDir, - Services: config.Services, - Registry: reg, - }, - } - - err := ret.modules.readModuleManifestSnapshot() - if err != nil { - return nil, fmt.Errorf("failed to read module manifest: %s", err) - } - - return ret, nil -} - -// ModulesDir returns the path to the directory where the loader will look for -// the local cache of remote module packages. -func (l *Loader) ModulesDir() string { - return l.modules.Dir -} - -// RefreshModules updates the in-memory cache of the module manifest from the -// module manifest file on disk. This is not necessary in normal use because -// module installation and configuration loading are separate steps, but it -// can be useful in tests where module installation is done as a part of -// configuration loading by a helper function. -// -// Call this function after any module installation where an existing loader -// is already alive and may be used again later. -// -// An error is returned if the manifest file cannot be read. -func (l *Loader) RefreshModules() error { - if l == nil { - // Nothing to do, then. - return nil - } - return l.modules.readModuleManifestSnapshot() -} - -// Sources returns the source code cache for the underlying parser of this -// loader. This is a shorthand for l.Parser().Sources(). -func (l *Loader) Sources() map[string][]byte { - return l.parser.Sources() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go deleted file mode 100644 index bcfa733e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go +++ /dev/null @@ -1,105 +0,0 @@ -package configload - -import ( - "fmt" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// LoadConfig reads the Terraform module in the given directory and uses it as the -// root module to build the static module tree that represents a configuration, -// assuming that all required descendent modules have already been installed. -// -// If error diagnostics are returned, the returned configuration may be either -// nil or incomplete. In the latter case, cautious static analysis is possible -// in spite of the errors. -// -// LoadConfig performs the basic syntax and uniqueness validations that are -// required to process the individual modules, and also detects -func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) { - rootMod, diags := l.parser.LoadConfigDir(rootDir) - if rootMod == nil { - return nil, diags - } - - cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad)) - diags = append(diags, cDiags...) - - return cfg, diags -} - -// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that -// are presumed to have already been installed. A different function -// (moduleWalkerInstall) is used for installation. -func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { - // Since we're just loading here, we expect that all referenced modules - // will be already installed and described in our manifest. However, we - // do verify that the manifest and the configuration are in agreement - // so that we can prompt the user to run "terraform init" if not. - - key := l.modules.manifest.ModuleKey(req.Path) - record, exists := l.modules.manifest[key] - - if !exists { - return nil, nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Module not installed", - Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", - Subject: &req.CallRange, - }, - } - } - - var diags hcl.Diagnostics - - // Check for inconsistencies between manifest and config - if req.SourceAddr != record.SourceAddr { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module source has changed", - Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.", - Subject: &req.SourceAddrRange, - }) - } - if len(req.VersionConstraint.Required) > 0 && record.Version == nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module version requirements have changed", - Detail: "The version requirements have changed since this module was installed and the installed version is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", - Subject: &req.SourceAddrRange, - }) - } - if record.Version != nil && !req.VersionConstraint.Required.Check(record.Version) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module version requirements have changed", - Detail: fmt.Sprintf( - "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", - record.Version, - ), - Subject: &req.SourceAddrRange, - }) - } - - mod, mDiags := l.parser.LoadConfigDir(record.Dir) - diags = append(diags, mDiags...) - if mod == nil { - // nil specifically indicates that the directory does not exist or - // cannot be read, so in this case we'll discard any generic diagnostics - // returned from LoadConfigDir and produce our own context-sensitive - // error message. - return nil, nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Module not installed", - Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir), - Subject: &req.CallRange, - }, - } - } - - return mod, record.Version, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go deleted file mode 100644 index 0772edc7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go +++ /dev/null @@ -1,492 +0,0 @@ -package configload - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "time" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" - "github.com/spf13/afero" -) - -// LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously -// creates an in-memory snapshot of the configuration files used, which can -// be later used to create a loader that may read only from this snapshot. -func (l *Loader) LoadConfigWithSnapshot(rootDir string) (*configs.Config, *Snapshot, hcl.Diagnostics) { - rootMod, diags := l.parser.LoadConfigDir(rootDir) - if rootMod == nil { - return nil, nil, diags - } - - snap := &Snapshot{ - Modules: map[string]*SnapshotModule{}, - } - walker := l.makeModuleWalkerSnapshot(snap) - cfg, cDiags := configs.BuildConfig(rootMod, walker) - diags = append(diags, cDiags...) - - addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil) - diags = append(diags, addDiags...) - - return cfg, snap, diags -} - -// NewLoaderFromSnapshot creates a Loader that reads files only from the -// given snapshot. -// -// A snapshot-based loader cannot install modules, so calling InstallModules -// on the return value will cause a panic. -// -// A snapshot-based loader also has access only to configuration files. Its -// underlying parser does not have access to other files in the native -// filesystem, such as values files. For those, either use a normal loader -// (created by NewLoader) or use the configs.Parser API directly. -func NewLoaderFromSnapshot(snap *Snapshot) *Loader { - fs := snapshotFS{snap} - parser := configs.NewParser(fs) - - ret := &Loader{ - parser: parser, - modules: moduleMgr{ - FS: afero.Afero{Fs: fs}, - CanInstall: false, - manifest: snap.moduleManifest(), - }, - } - - return ret -} - -// Snapshot is an in-memory representation of the source files from a -// configuration, which can be used as an alternative configurations source -// for a loader with NewLoaderFromSnapshot. -// -// The primary purpose of a Snapshot is to build the configuration portion -// of a plan file (see ../../plans/planfile) so that it can later be reloaded -// and used to recover the exact configuration that the plan was built from. -type Snapshot struct { - // Modules is a map from opaque module keys (suitable for use as directory - // names on all supported operating systems) to the snapshot information - // about each module. - Modules map[string]*SnapshotModule -} - -// SnapshotModule represents a single module within a Snapshot. -type SnapshotModule struct { - // Dir is the path, relative to the root directory given when the - // snapshot was created, where the module appears in the snapshot's - // virtual filesystem. - Dir string - - // Files is a map from each configuration file filename for the - // module to a raw byte representation of the source file contents. - Files map[string][]byte - - // SourceAddr is the source address given for this module in configuration. - SourceAddr string `json:"Source"` - - // Version is the version of the module that is installed, or nil if - // the module is installed from a source that does not support versions. - Version *version.Version `json:"-"` -} - -// moduleManifest constructs a module manifest based on the contents of -// the receiving snapshot. -func (s *Snapshot) moduleManifest() modsdir.Manifest { - ret := make(modsdir.Manifest) - - for k, modSnap := range s.Modules { - ret[k] = modsdir.Record{ - Key: k, - Dir: modSnap.Dir, - SourceAddr: modSnap.SourceAddr, - Version: modSnap.Version, - } - } - - return ret -} - -// makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit -// the same lookup behaviors as l.moduleWalkerLoad but will additionally write -// source files from the referenced modules into the given snapshot. -func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker { - return configs.ModuleWalkerFunc( - func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { - mod, v, diags := l.moduleWalkerLoad(req) - if diags.HasErrors() { - return mod, v, diags - } - - key := l.modules.manifest.ModuleKey(req.Path) - record, exists := l.modules.manifest[key] - - if !exists { - // Should never happen, since otherwise moduleWalkerLoader would've - // returned an error and we would've returned already. - panic(fmt.Sprintf("module %s is not present in manifest", key)) - } - - addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version) - diags = append(diags, addDiags...) - - return mod, v, diags - }, - ) -} - -func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics { - var diags hcl.Diagnostics - - primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir) - if moreDiags.HasErrors() { - // Any diagnostics we get here should be already present - // in diags, so it's weird if we get here but we'll allow it - // and return a general error message in that case. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Failed to read directory for module", - Detail: fmt.Sprintf("The source directory %s could not be read", dir), - }) - return diags - } - - snapMod := &SnapshotModule{ - Dir: dir, - Files: map[string][]byte{}, - SourceAddr: sourceAddr, - Version: v, - } - - files := make([]string, 0, len(primaryFiles)+len(overrideFiles)) - files = append(files, primaryFiles...) - files = append(files, overrideFiles...) - sources := l.Sources() // should be populated with all the files we need by now - for _, filePath := range files { - filename := filepath.Base(filePath) - src, exists := sources[filePath] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing source file for snapshot", - Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath), - }) - continue - } - snapMod.Files[filepath.Clean(filename)] = src - } - - snap.Modules[key] = snapMod - - return diags -} - -// snapshotFS is an implementation of afero.Fs that reads from a snapshot. -// -// This is not intended as a general-purpose filesystem implementation. Instead, -// it just supports the minimal functionality required to support the -// configuration loader and parser as an implementation detail of creating -// a loader from a snapshot. -type snapshotFS struct { - snap *Snapshot -} - -var _ afero.Fs = snapshotFS{} - -func (fs snapshotFS) Create(name string) (afero.File, error) { - return nil, fmt.Errorf("cannot create file inside configuration snapshot") -} - -func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error { - return fmt.Errorf("cannot create directory inside configuration snapshot") -} - -func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error { - return fmt.Errorf("cannot create directories inside configuration snapshot") -} - -func (fs snapshotFS) Open(name string) (afero.File, error) { - - // Our "filesystem" is sparsely populated only with the directories - // mentioned by modules in our snapshot, so the high-level process - // for opening a file is: - // - Find the module snapshot corresponding to the containing directory - // - Find the file within that snapshot - // - Wrap the resulting byte slice in a snapshotFile to return - // - // The other possibility handled here is if the given name is for the - // module directory itself, in which case we'll return a snapshotDir - // instead. - // - // This function doesn't try to be incredibly robust in supporting - // different permutations of paths, etc because in practice we only - // need to support the path forms that our own loader and parser will - // generate. - - dir := filepath.Dir(name) - fn := filepath.Base(name) - directDir := filepath.Clean(name) - - // First we'll check to see if this is an exact path for a module directory. - // We need to do this first (rather than as part of the next loop below) - // because a module in a child directory of another module can otherwise - // appear to be a file in that parent directory. - for _, candidate := range fs.snap.Modules { - modDir := filepath.Clean(candidate.Dir) - if modDir == directDir { - // We've matched the module directory itself - filenames := make([]string, 0, len(candidate.Files)) - for n := range candidate.Files { - filenames = append(filenames, n) - } - sort.Strings(filenames) - return snapshotDir{ - filenames: filenames, - }, nil - } - } - - // If we get here then the given path isn't a module directory exactly, so - // we'll treat it as a file path and try to find a module directory it - // could be located in. - var modSnap *SnapshotModule - for _, candidate := range fs.snap.Modules { - modDir := filepath.Clean(candidate.Dir) - if modDir == dir { - modSnap = candidate - break - } - } - if modSnap == nil { - return nil, os.ErrNotExist - } - - src, exists := modSnap.Files[fn] - if !exists { - return nil, os.ErrNotExist - } - - return &snapshotFile{ - src: src, - }, nil -} - -func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { - return fs.Open(name) -} - -func (fs snapshotFS) Remove(name string) error { - return fmt.Errorf("cannot remove file inside configuration snapshot") -} - -func (fs snapshotFS) RemoveAll(path string) error { - return fmt.Errorf("cannot remove files inside configuration snapshot") -} - -func (fs snapshotFS) Rename(old, new string) error { - return fmt.Errorf("cannot rename file inside configuration snapshot") -} - -func (fs snapshotFS) Stat(name string) (os.FileInfo, error) { - f, err := fs.Open(name) - if err != nil { - return nil, err - } - _, isDir := f.(snapshotDir) - return snapshotFileInfo{ - name: filepath.Base(name), - isDir: isDir, - }, nil -} - -func (fs snapshotFS) Name() string { - return "ConfigSnapshotFS" -} - -func (fs snapshotFS) Chmod(name string, mode os.FileMode) error { - return fmt.Errorf("cannot set file mode inside configuration snapshot") -} - -func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error { - return fmt.Errorf("cannot set file times inside configuration snapshot") -} - -type snapshotFile struct { - snapshotFileStub - src []byte - at int64 -} - -var _ afero.File = (*snapshotFile)(nil) - -func (f *snapshotFile) Read(p []byte) (n int, err error) { - if len(p) > 0 && f.at == int64(len(f.src)) { - return 0, io.EOF - } - if f.at > int64(len(f.src)) { - return 0, io.ErrUnexpectedEOF - } - if int64(len(f.src))-f.at >= int64(len(p)) { - n = len(p) - } else { - n = int(int64(len(f.src)) - f.at) - } - copy(p, f.src[f.at:f.at+int64(n)]) - f.at += int64(n) - return -} - -func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) { - f.at = off - return f.Read(p) -} - -func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) { - switch whence { - case 0: - f.at = offset - case 1: - f.at += offset - case 2: - f.at = int64(len(f.src)) + offset - } - return f.at, nil -} - -type snapshotDir struct { - snapshotFileStub - filenames []string - at int -} - -var _ afero.File = snapshotDir{} - -func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) { - names, err := f.Readdirnames(count) - if err != nil { - return nil, err - } - ret := make([]os.FileInfo, len(names)) - for i, name := range names { - ret[i] = snapshotFileInfo{ - name: name, - isDir: false, - } - } - return ret, nil -} - -func (f snapshotDir) Readdirnames(count int) ([]string, error) { - var outLen int - names := f.filenames[f.at:] - if count > 0 { - if len(names) < count { - outLen = len(names) - } else { - outLen = count - } - if len(names) == 0 { - return nil, io.EOF - } - } else { - outLen = len(names) - } - f.at += outLen - - return names[:outLen], nil -} - -// snapshotFileInfo is a minimal implementation of os.FileInfo to support our -// virtual filesystem from snapshots. -type snapshotFileInfo struct { - name string - isDir bool -} - -var _ os.FileInfo = snapshotFileInfo{} - -func (fi snapshotFileInfo) Name() string { - return fi.name -} - -func (fi snapshotFileInfo) Size() int64 { - // In practice, our parser and loader never call Size - return -1 -} - -func (fi snapshotFileInfo) Mode() os.FileMode { - return os.ModePerm -} - -func (fi snapshotFileInfo) ModTime() time.Time { - return time.Now() -} - -func (fi snapshotFileInfo) IsDir() bool { - return fi.isDir -} - -func (fi snapshotFileInfo) Sys() interface{} { - return nil -} - -type snapshotFileStub struct{} - -func (f snapshotFileStub) Close() error { - return nil -} - -func (f snapshotFileStub) Read(p []byte) (n int, err error) { - return 0, fmt.Errorf("cannot read") -} - -func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) { - return 0, fmt.Errorf("cannot read") -} - -func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) { - return 0, fmt.Errorf("cannot seek") -} - -func (f snapshotFileStub) Write(p []byte) (n int, err error) { - return f.WriteAt(p, 0) -} - -func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) { - return 0, fmt.Errorf("cannot write to file in snapshot") -} - -func (f snapshotFileStub) WriteString(s string) (n int, err error) { - return 0, fmt.Errorf("cannot write to file in snapshot") -} - -func (f snapshotFileStub) Name() string { - // in practice, the loader and parser never use this - return "" -} - -func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) { - return nil, fmt.Errorf("cannot use Readdir on a file") -} - -func (f snapshotFileStub) Readdirnames(count int) ([]string, error) { - return nil, fmt.Errorf("cannot use Readdir on a file") -} - -func (f snapshotFileStub) Stat() (os.FileInfo, error) { - return nil, fmt.Errorf("cannot stat") -} - -func (f snapshotFileStub) Sync() error { - return nil -} - -func (f snapshotFileStub) Truncate(size int64) error { - return fmt.Errorf("cannot write to file in snapshot") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go deleted file mode 100644 index 797f50d2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go +++ /dev/null @@ -1,62 +0,0 @@ -package configload - -import ( - "os" - "path/filepath" - - "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/spf13/afero" -) - -type moduleMgr struct { - FS afero.Afero - - // CanInstall is true for a module manager that can support installation. - // - // This must be set only if FS is an afero.OsFs, because the installer - // (which uses go-getter) is not aware of the virtual filesystem - // abstraction and will always write into the "real" filesystem. - CanInstall bool - - // Dir is the path where descendent modules are (or will be) installed. - Dir string - - // Services is a service discovery client that will be used to find - // remote module registry endpoints. This object may be pre-loaded with - // cached discovery information. - Services *disco.Disco - - // Registry is a client for the module registry protocol, which is used - // when a module is requested from a registry source. - Registry *registry.Client - - // manifest tracks the currently-installed modules for this manager. - // - // The loader may read this. Only the installer may write to it, and - // after a set of updates are completed the installer must call - // writeModuleManifestSnapshot to persist a snapshot of the manifest - // to disk for use on subsequent runs. - manifest modsdir.Manifest -} - -func (m *moduleMgr) manifestSnapshotPath() string { - return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename) -} - -// readModuleManifestSnapshot loads a manifest snapshot from the filesystem. -func (m *moduleMgr) readModuleManifestSnapshot() error { - r, err := m.FS.Open(m.manifestSnapshotPath()) - if err != nil { - if os.IsNotExist(err) { - // We'll treat a missing file as an empty manifest - m.manifest = make(modsdir.Manifest) - return nil - } - return err - } - - m.manifest, err = modsdir.ReadManifestSnapshot(r) - return err -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go deleted file mode 100644 index 86ca9d10..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go +++ /dev/null @@ -1,43 +0,0 @@ -package configload - -import ( - "io/ioutil" - "os" - "testing" -) - -// NewLoaderForTests is a variant of NewLoader that is intended to be more -// convenient for unit tests. -// -// The loader's modules directory is a separate temporary directory created -// for each call. Along with the created loader, this function returns a -// cleanup function that should be called before the test completes in order -// to remove that temporary directory. -// -// In the case of any errors, t.Fatal (or similar) will be called to halt -// execution of the test, so the calling test does not need to handle errors -// itself. -func NewLoaderForTests(t *testing.T) (*Loader, func()) { - t.Helper() - - modulesDir, err := ioutil.TempDir("", "tf-configs") - if err != nil { - t.Fatalf("failed to create temporary modules dir: %s", err) - return nil, func() {} - } - - cleanup := func() { - os.RemoveAll(modulesDir) - } - - loader, err := NewLoader(&Config{ - ModulesDir: modulesDir, - }) - if err != nil { - cleanup() - t.Fatalf("failed to create config loader: %s", err) - return nil, func() {} - } - - return loader, cleanup -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go deleted file mode 100644 index 2c21ca5e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go +++ /dev/null @@ -1,123 +0,0 @@ -package configschema - -import ( - "github.com/hashicorp/hcl/v2/hcldec" -) - -var mapLabelNames = []string{"key"} - -// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body -// using the facilities in the hcldec package. -// -// The returned specification is guaranteed to return a value of the same type -// returned by method ImpliedType, but it may contain null values if any of the -// block attributes are defined as optional and/or computed respectively. -func (b *Block) DecoderSpec() hcldec.Spec { - ret := hcldec.ObjectSpec{} - if b == nil { - return ret - } - - for name, attrS := range b.Attributes { - ret[name] = attrS.decoderSpec(name) - } - - for name, blockS := range b.BlockTypes { - if _, exists := ret[name]; exists { - // This indicates an invalid schema, since it's not valid to - // define both an attribute and a block type of the same name. - // However, we don't raise this here since it's checked by - // InternalValidate. - continue - } - - childSpec := blockS.Block.DecoderSpec() - - // We can only validate 0 or 1 for MinItems, because a dynamic block - // may satisfy any number of min items while only having a single - // block in the config. We cannot validate MaxItems because a - // configuration may have any number of dynamic blocks - minItems := 0 - if blockS.MinItems > 1 { - minItems = 1 - } - - switch blockS.Nesting { - case NestingSingle, NestingGroup: - ret[name] = &hcldec.BlockSpec{ - TypeName: name, - Nested: childSpec, - Required: blockS.MinItems == 1, - } - if blockS.Nesting == NestingGroup { - ret[name] = &hcldec.DefaultSpec{ - Primary: ret[name], - Default: &hcldec.LiteralSpec{ - Value: blockS.EmptyValue(), - }, - } - } - case NestingList: - // We prefer to use a list where possible, since it makes our - // implied type more complete, but if there are any - // dynamically-typed attributes inside we must use a tuple - // instead, at the expense of our type then not being predictable. - if blockS.Block.ImpliedType().HasDynamicTypes() { - ret[name] = &hcldec.BlockTupleSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - } else { - ret[name] = &hcldec.BlockListSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - } - case NestingSet: - // We forbid dynamically-typed attributes inside NestingSet in - // InternalValidate, so we don't do anything special to handle - // that here. (There is no set analog to tuple and object types, - // because cty's set implementation depends on knowing the static - // type in order to properly compute its internal hashes.) - ret[name] = &hcldec.BlockSetSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - case NestingMap: - // We prefer to use a list where possible, since it makes our - // implied type more complete, but if there are any - // dynamically-typed attributes inside we must use a tuple - // instead, at the expense of our type then not being predictable. - if blockS.Block.ImpliedType().HasDynamicTypes() { - ret[name] = &hcldec.BlockObjectSpec{ - TypeName: name, - Nested: childSpec, - LabelNames: mapLabelNames, - } - } else { - ret[name] = &hcldec.BlockMapSpec{ - TypeName: name, - Nested: childSpec, - LabelNames: mapLabelNames, - } - } - default: - // Invalid nesting type is just ignored. It's checked by - // InternalValidate. - continue - } - } - - return ret -} - -func (a *Attribute) decoderSpec(name string) hcldec.Spec { - return &hcldec.AttrSpec{ - Name: name, - Type: a.Type, - Required: a.Required, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go deleted file mode 100644 index 51f51ceb..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package configschema - -import ( - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/zclconf/go-cty/cty" -) - -// ImpliedType returns the cty.Type that would result from decoding a -// configuration block using the receiving block schema. -// -// ImpliedType always returns a result, even if the given schema is -// inconsistent. Code that creates configschema.Block objects should be -// tested using the InternalValidate method to detect any inconsistencies -// that would cause this method to fall back on defaults and assumptions. -func (b *Block) ImpliedType() cty.Type { - if b == nil { - return cty.EmptyObject - } - - return hcldec.ImpliedType(b.DecoderSpec()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go deleted file mode 100644 index ebf1abba..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go +++ /dev/null @@ -1,105 +0,0 @@ -package configschema - -import ( - "fmt" - "regexp" - - "github.com/zclconf/go-cty/cty" - - multierror "github.com/hashicorp/go-multierror" -) - -var validName = regexp.MustCompile(`^[a-z0-9_]+$`) - -// InternalValidate returns an error if the receiving block and its child -// schema definitions have any consistencies with the documented rules for -// valid schema. -// -// This is intended to be used within unit tests to detect when a given -// schema is invalid. -func (b *Block) InternalValidate() error { - if b == nil { - return fmt.Errorf("top-level block schema is nil") - } - return b.internalValidate("", nil) - -} - -func (b *Block) internalValidate(prefix string, err error) error { - for name, attrS := range b.Attributes { - if attrS == nil { - err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) - continue - } - if !validName.MatchString(name) { - err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) - } - if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { - err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) - } - if attrS.Optional && attrS.Required { - err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) - } - if attrS.Computed && attrS.Required { - err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) - } - if attrS.Type == cty.NilType { - err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) - } - } - - for name, blockS := range b.BlockTypes { - if blockS == nil { - err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) - continue - } - - if _, isAttr := b.Attributes[name]; isAttr { - err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) - } else if !validName.MatchString(name) { - err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) - } - - if blockS.MinItems < 0 || blockS.MaxItems < 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) - } - - switch blockS.Nesting { - case NestingSingle: - switch { - case blockS.MinItems != blockS.MaxItems: - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) - case blockS.MinItems < 0 || blockS.MinItems > 1: - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) - } - case NestingGroup: - if blockS.MinItems != 0 || blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) - } - case NestingList, NestingSet: - if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) - } - if blockS.Nesting == NestingSet { - ety := blockS.Block.ImpliedType() - if ety.HasDynamicTypes() { - // This is not permitted because the HCL (cty) set implementation - // needs to know the exact type of set elements in order to - // properly hash them, and so can't support mixed types. - err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) - } - } - case NestingMap: - if blockS.MinItems != 0 || blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) - } - default: - err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) - } - - subPrefix := prefix + name + "." - err = blockS.Block.internalValidate(subPrefix, err) - } - - return err -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go deleted file mode 100644 index 0be3b8fa..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go +++ /dev/null @@ -1,38 +0,0 @@ -package configschema - -// NoneRequired returns a deep copy of the receiver with any required -// attributes translated to optional. -func (b *Block) NoneRequired() *Block { - ret := &Block{} - - if b.Attributes != nil { - ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) - } - for name, attrS := range b.Attributes { - ret.Attributes[name] = attrS.forceOptional() - } - - if b.BlockTypes != nil { - ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) - } - for name, blockS := range b.BlockTypes { - ret.BlockTypes[name] = blockS.noneRequired() - } - - return ret -} - -func (b *NestedBlock) noneRequired() *NestedBlock { - ret := *b - ret.Block = *(ret.Block.NoneRequired()) - ret.MinItems = 0 - ret.MaxItems = 0 - return &ret -} - -func (a *Attribute) forceOptional() *Attribute { - ret := *a - ret.Optional = true - ret.Required = false - return &ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go deleted file mode 100644 index 446705ba..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go +++ /dev/null @@ -1,173 +0,0 @@ -package configschema - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// StaticValidateTraversal checks whether the given traversal (which must be -// relative) refers to a construct in the receiving schema, returning error -// diagnostics if any problems are found. -// -// This method is "optimistic" in that it will not return errors for possible -// problems that cannot be detected statically. It is possible that an -// traversal which passed static validation will still fail when evaluated. -func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { - if !traversal.IsRelative() { - panic("StaticValidateTraversal on absolute traversal") - } - if len(traversal) == 0 { - return nil - } - - var diags tfdiags.Diagnostics - - next := traversal[0] - after := traversal[1:] - - var name string - switch step := next.(type) { - case hcl.TraverseAttr: - name = step.Name - case hcl.TraverseIndex: - // No other traversal step types are allowed directly at a block. - // If it looks like the user was trying to use index syntax to - // access an attribute then we'll produce a specialized message. - key := step.Key - if key.Type() == cty.String && key.IsKnown() && !key.IsNull() { - maybeName := key.AsString() - if hclsyntax.ValidIdentifier(maybeName) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid index operation`, - Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName), - Subject: &step.SrcRange, - }) - return diags - } - } - // If it looks like some other kind of index then we'll use a generic error. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid index operation`, - Detail: `Only attribute access is allowed here, using the dot operator.`, - Subject: &step.SrcRange, - }) - return diags - default: - // No other traversal types should appear in a normal valid traversal, - // but we'll handle this with a generic error anyway to be robust. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid operation`, - Detail: `Only attribute access is allowed here, using the dot operator.`, - Subject: next.SourceRange().Ptr(), - }) - return diags - } - - if attrS, exists := b.Attributes[name]; exists { - // For attribute validation we will just apply the rest of the - // traversal to an unknown value of the attribute type and pass - // through HCL's own errors, since we don't want to replicate all of - // HCL's type checking rules here. - val := cty.UnknownVal(attrS.Type) - _, hclDiags := after.TraverseRel(val) - diags = diags.Append(hclDiags) - return diags - } - - if blockS, exists := b.BlockTypes[name]; exists { - moreDiags := blockS.staticValidateTraversal(name, after) - diags = diags.Append(moreDiags) - return diags - } - - // If we get here then the name isn't valid at all. We'll collect up - // all of the names that _are_ valid to use as suggestions. - var suggestions []string - for name := range b.Attributes { - suggestions = append(suggestions, name) - } - for name := range b.BlockTypes { - suggestions = append(suggestions, name) - } - sort.Strings(suggestions) - suggestion := didyoumean.NameSuggestion(name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Unsupported attribute`, - Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion), - Subject: next.SourceRange().Ptr(), - }) - - return diags -} - -func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics { - if b.Nesting == NestingSingle || b.Nesting == NestingGroup { - // Single blocks are easy: just pass right through. - return b.Block.StaticValidateTraversal(traversal) - } - - if len(traversal) == 0 { - // It's always valid to access a nested block's attribute directly. - return nil - } - - var diags tfdiags.Diagnostics - next := traversal[0] - after := traversal[1:] - - switch b.Nesting { - - case NestingSet: - // Can't traverse into a set at all, since it does not have any keys - // to index with. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Cannot index a set value`, - Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName), - Subject: next.SourceRange().Ptr(), - }) - return diags - - case NestingList: - if _, ok := next.(hcl.TraverseIndex); ok { - moreDiags := b.Block.StaticValidateTraversal(after) - diags = diags.Append(moreDiags) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid operation`, - Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName), - Subject: next.SourceRange().Ptr(), - }) - } - return diags - - case NestingMap: - // Both attribute and index steps are valid for maps, so we'll just - // pass through here and let normal evaluation catch an - // incorrectly-typed index key later, if present. - moreDiags := b.Block.StaticValidateTraversal(after) - diags = diags.Append(moreDiags) - return diags - - default: - // Invalid nesting type is just ignored. It's checked by - // InternalValidate. (Note that we handled NestingSingle separately - // back at the start of this function.) - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go deleted file mode 100644 index 036c2d6c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go +++ /dev/null @@ -1,23 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) { - var ret []hcl.Traversal - exprs, diags := hcl.ExprList(attr.Expr) - - for _, expr := range exprs { - expr, shimDiags := shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.AbsTraversalForExpr(expr) - diags = append(diags, travDiags...) - if len(traversal) != 0 { - ret = append(ret, traversal) - } - } - - return ret, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go deleted file mode 100644 index f01eb79f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package configs contains types that represent Terraform configurations and -// the different elements thereof. -// -// The functionality in this package can be used for some static analyses of -// Terraform configurations, but this package generally exposes representations -// of the configuration source code rather than the result of evaluating these -// objects. The sibling package "lang" deals with evaluation of structures -// and expressions in the configuration. -// -// Due to its close relationship with HCL, this package makes frequent use -// of types from the HCL API, including raw HCL diagnostic messages. Such -// diagnostics can be converted into Terraform-flavored diagnostics, if needed, -// using functions in the sibling package tfdiags. -// -// The Parser type is the main entry-point into this package. The LoadConfigDir -// method can be used to load a single module directory, and then a full -// configuration (including any descendent modules) can be produced using -// the top-level BuildConfig method. -package configs diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go deleted file mode 100644 index 78223c3b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go +++ /dev/null @@ -1,404 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Module is a container for a set of configuration constructs that are -// evaluated within a common namespace. -type Module struct { - // SourceDir is the filesystem directory that the module was loaded from. - // - // This is populated automatically only for configurations loaded with - // LoadConfigDir. If the parser is using a virtual filesystem then the - // path here will be in terms of that virtual filesystem. - - // Any other caller that constructs a module directly with NewModule may - // assign a suitable value to this attribute before using it for other - // purposes. It should be treated as immutable by all consumers of Module - // values. - SourceDir string - - CoreVersionConstraints []VersionConstraint - - Backend *Backend - ProviderConfigs map[string]*Provider - ProviderRequirements map[string][]VersionConstraint - - Variables map[string]*Variable - Locals map[string]*Local - Outputs map[string]*Output - - ModuleCalls map[string]*ModuleCall - - ManagedResources map[string]*Resource - DataResources map[string]*Resource -} - -// File describes the contents of a single configuration file. -// -// Individual files are not usually used alone, but rather combined together -// with other files (conventionally, those in the same directory) to produce -// a *Module, using NewModule. -// -// At the level of an individual file we represent directly the structural -// elements present in the file, without any attempt to detect conflicting -// declarations. A File object can therefore be used for some basic static -// analysis of individual elements, but must be built into a Module to detect -// duplicate declarations. -type File struct { - CoreVersionConstraints []VersionConstraint - - Backends []*Backend - ProviderConfigs []*Provider - ProviderRequirements []*ProviderRequirement - - Variables []*Variable - Locals []*Local - Outputs []*Output - - ModuleCalls []*ModuleCall - - ManagedResources []*Resource - DataResources []*Resource -} - -// NewModule takes a list of primary files and a list of override files and -// produces a *Module by combining the files together. -// -// If there are any conflicting declarations in the given files -- for example, -// if the same variable name is defined twice -- then the resulting module -// will be incomplete and error diagnostics will be returned. Careful static -// analysis of the returned Module is still possible in this case, but the -// module will probably not be semantically valid. -func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { - var diags hcl.Diagnostics - mod := &Module{ - ProviderConfigs: map[string]*Provider{}, - ProviderRequirements: map[string][]VersionConstraint{}, - Variables: map[string]*Variable{}, - Locals: map[string]*Local{}, - Outputs: map[string]*Output{}, - ModuleCalls: map[string]*ModuleCall{}, - ManagedResources: map[string]*Resource{}, - DataResources: map[string]*Resource{}, - } - - for _, file := range primaryFiles { - fileDiags := mod.appendFile(file) - diags = append(diags, fileDiags...) - } - - for _, file := range overrideFiles { - fileDiags := mod.mergeFile(file) - diags = append(diags, fileDiags...) - } - - return mod, diags -} - -// ResourceByAddr returns the configuration for the resource with the given -// address, or nil if there is no such resource. -func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { - key := addr.String() - switch addr.Mode { - case addrs.ManagedResourceMode: - return m.ManagedResources[key] - case addrs.DataResourceMode: - return m.DataResources[key] - default: - return nil - } -} - -func (m *Module) appendFile(file *File) hcl.Diagnostics { - var diags hcl.Diagnostics - - for _, constraint := range file.CoreVersionConstraints { - // If there are any conflicting requirements then we'll catch them - // when we actually check these constraints. - m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) - } - - for _, b := range file.Backends { - if m.Backend != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate backend configuration", - Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), - Subject: &b.DeclRange, - }) - continue - } - m.Backend = b - } - - for _, pc := range file.ProviderConfigs { - key := pc.moduleUniqueKey() - if existing, exists := m.ProviderConfigs[key]; exists { - if existing.Alias == "" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider configuration", - Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), - Subject: &pc.DeclRange, - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider configuration", - Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), - Subject: &pc.DeclRange, - }) - } - continue - } - m.ProviderConfigs[key] = pc - } - - for _, reqd := range file.ProviderRequirements { - m.ProviderRequirements[reqd.Name] = append(m.ProviderRequirements[reqd.Name], reqd.Requirement) - } - - for _, v := range file.Variables { - if existing, exists := m.Variables[v.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate variable declaration", - Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &v.DeclRange, - }) - } - m.Variables[v.Name] = v - } - - for _, l := range file.Locals { - if existing, exists := m.Locals[l.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate local value definition", - Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &l.DeclRange, - }) - } - m.Locals[l.Name] = l - } - - for _, o := range file.Outputs { - if existing, exists := m.Outputs[o.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate output definition", - Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &o.DeclRange, - }) - } - m.Outputs[o.Name] = o - } - - for _, mc := range file.ModuleCalls { - if existing, exists := m.ModuleCalls[mc.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate module call", - Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), - Subject: &mc.DeclRange, - }) - } - m.ModuleCalls[mc.Name] = mc - } - - for _, r := range file.ManagedResources { - key := r.moduleUniqueKey() - if existing, exists := m.ManagedResources[key]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), - Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - m.ManagedResources[key] = r - } - - for _, r := range file.DataResources { - key := r.moduleUniqueKey() - if existing, exists := m.DataResources[key]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), - Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - m.DataResources[key] = r - } - - return diags -} - -func (m *Module) mergeFile(file *File) hcl.Diagnostics { - var diags hcl.Diagnostics - - if len(file.CoreVersionConstraints) != 0 { - // This is a bit of a strange case for overriding since we normally - // would union together across multiple files anyway, but we'll - // allow it and have each override file clobber any existing list. - m.CoreVersionConstraints = nil - for _, constraint := range file.CoreVersionConstraints { - m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) - } - } - - if len(file.Backends) != 0 { - switch len(file.Backends) { - case 1: - m.Backend = file.Backends[0] - default: - // An override file with multiple backends is still invalid, even - // though it can override backends from _other_ files. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate backend configuration", - Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), - Subject: &file.Backends[1].DeclRange, - }) - } - } - - for _, pc := range file.ProviderConfigs { - key := pc.moduleUniqueKey() - existing, exists := m.ProviderConfigs[key] - if pc.Alias == "" { - // We allow overriding a non-existing _default_ provider configuration - // because the user model is that an absent provider configuration - // implies an empty provider configuration, which is what the user - // is therefore overriding here. - if exists { - mergeDiags := existing.merge(pc) - diags = append(diags, mergeDiags...) - } else { - m.ProviderConfigs[key] = pc - } - } else { - // For aliased providers, there must be a base configuration to - // override. This allows us to detect and report alias typos - // that might otherwise cause the override to not apply. - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base provider configuration for override", - Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), - Subject: &pc.DeclRange, - }) - continue - } - mergeDiags := existing.merge(pc) - diags = append(diags, mergeDiags...) - } - } - - if len(file.ProviderRequirements) != 0 { - mergeProviderVersionConstraints(m.ProviderRequirements, file.ProviderRequirements) - } - - for _, v := range file.Variables { - existing, exists := m.Variables[v.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base variable declaration to override", - Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), - Subject: &v.DeclRange, - }) - continue - } - mergeDiags := existing.merge(v) - diags = append(diags, mergeDiags...) - } - - for _, l := range file.Locals { - existing, exists := m.Locals[l.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base local value definition to override", - Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), - Subject: &l.DeclRange, - }) - continue - } - mergeDiags := existing.merge(l) - diags = append(diags, mergeDiags...) - } - - for _, o := range file.Outputs { - existing, exists := m.Outputs[o.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base output definition to override", - Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), - Subject: &o.DeclRange, - }) - continue - } - mergeDiags := existing.merge(o) - diags = append(diags, mergeDiags...) - } - - for _, mc := range file.ModuleCalls { - existing, exists := m.ModuleCalls[mc.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing module call to override", - Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), - Subject: &mc.DeclRange, - }) - continue - } - mergeDiags := existing.merge(mc) - diags = append(diags, mergeDiags...) - } - - for _, r := range file.ManagedResources { - key := r.moduleUniqueKey() - existing, exists := m.ManagedResources[key] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing resource to override", - Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), - Subject: &r.DeclRange, - }) - continue - } - mergeDiags := existing.merge(r) - diags = append(diags, mergeDiags...) - } - - for _, r := range file.DataResources { - key := r.moduleUniqueKey() - existing, exists := m.DataResources[key] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing data resource to override", - Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), - Subject: &r.DeclRange, - }) - continue - } - mergeDiags := existing.merge(r) - diags = append(diags, mergeDiags...) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go deleted file mode 100644 index a484ffef..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go +++ /dev/null @@ -1,188 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// ModuleCall represents a "module" block in a module or file. -type ModuleCall struct { - Name string - - SourceAddr string - SourceAddrRange hcl.Range - SourceSet bool - - Config hcl.Body - - Version VersionConstraint - - Count hcl.Expression - ForEach hcl.Expression - - Providers []PassedProviderConfig - - DependsOn []hcl.Traversal - - DeclRange hcl.Range -} - -func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) { - mc := &ModuleCall{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - schema := moduleBlockSchema - if override { - schema = schemaForOverrides(schema) - } - - content, remain, diags := block.Body.PartialContent(schema) - mc.Config = remain - - if !hclsyntax.ValidIdentifier(mc.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - if attr, exists := content.Attributes["source"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr) - diags = append(diags, valDiags...) - mc.SourceAddrRange = attr.Expr.Range() - mc.SourceSet = true - } - - if attr, exists := content.Attributes["version"]; exists { - var versionDiags hcl.Diagnostics - mc.Version, versionDiags = decodeVersionConstraint(attr) - diags = append(diags, versionDiags...) - } - - if attr, exists := content.Attributes["count"]; exists { - mc.Count = attr.Expr - - // We currently parse this, but don't yet do anything with it. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in module block", - Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), - Subject: &attr.NameRange, - }) - } - - if attr, exists := content.Attributes["for_each"]; exists { - mc.ForEach = attr.Expr - - // We currently parse this, but don't yet do anything with it. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in module block", - Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), - Subject: &attr.NameRange, - }) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - mc.DependsOn = append(mc.DependsOn, deps...) - - // We currently parse this, but don't yet do anything with it. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in module block", - Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), - Subject: &attr.NameRange, - }) - } - - if attr, exists := content.Attributes["providers"]; exists { - seen := make(map[string]hcl.Range) - pairs, pDiags := hcl.ExprMap(attr.Expr) - diags = append(diags, pDiags...) - for _, pair := range pairs { - key, keyDiags := decodeProviderConfigRef(pair.Key, "providers") - diags = append(diags, keyDiags...) - value, valueDiags := decodeProviderConfigRef(pair.Value, "providers") - diags = append(diags, valueDiags...) - if keyDiags.HasErrors() || valueDiags.HasErrors() { - continue - } - - matchKey := key.String() - if prev, exists := seen[matchKey]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider address", - Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev), - Subject: pair.Value.Range().Ptr(), - }) - continue - } - - rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range()) - seen[matchKey] = rng - mc.Providers = append(mc.Providers, PassedProviderConfig{ - InChild: key, - InParent: value, - }) - } - } - - // Reserved block types (all of them) - for _, block := range content.Blocks { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in module block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - - return mc, diags -} - -// PassedProviderConfig represents a provider config explicitly passed down to -// a child module, possibly giving it a new local address in the process. -type PassedProviderConfig struct { - InChild *ProviderConfigRef - InParent *ProviderConfigRef -} - -var moduleBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "source", - Required: true, - }, - { - Name: "version", - }, - { - Name: "count", - }, - { - Name: "for_each", - }, - { - Name: "depends_on", - }, - { - Name: "providers", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - // These are all reserved for future use. - {Type: "lifecycle"}, - {Type: "locals"}, - {Type: "provider", LabelNames: []string{"type"}}, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go deleted file mode 100644 index 6fb82acf..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go +++ /dev/null @@ -1,247 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// The methods in this file are used by Module.mergeFile to apply overrides -// to our different configuration elements. These methods all follow the -// pattern of mutating the receiver to incorporate settings from the parameter, -// returning error diagnostics if any aspect of the parameter cannot be merged -// into the receiver for some reason. -// -// User expectation is that anything _explicitly_ set in the given object -// should take precedence over the corresponding settings in the receiver, -// but that anything omitted in the given object should be left unchanged. -// In some cases it may be reasonable to do a "deep merge" of certain nested -// features, if it is possible to unambiguously correlate the nested elements -// and their behaviors are orthogonal to each other. - -func (p *Provider) merge(op *Provider) hcl.Diagnostics { - var diags hcl.Diagnostics - - if op.Version.Required != nil { - p.Version = op.Version - } - - p.Config = MergeBodies(p.Config, op.Config) - - return diags -} - -func mergeProviderVersionConstraints(recv map[string][]VersionConstraint, ovrd []*ProviderRequirement) { - // Any provider name that's mentioned in the override gets nilled out in - // our map so that we'll rebuild it below. Any provider not mentioned is - // left unchanged. - for _, reqd := range ovrd { - delete(recv, reqd.Name) - } - for _, reqd := range ovrd { - recv[reqd.Name] = append(recv[reqd.Name], reqd.Requirement) - } -} - -func (v *Variable) merge(ov *Variable) hcl.Diagnostics { - var diags hcl.Diagnostics - - if ov.DescriptionSet { - v.Description = ov.Description - v.DescriptionSet = ov.DescriptionSet - } - if ov.Default != cty.NilVal { - v.Default = ov.Default - } - if ov.Type != cty.NilType { - v.Type = ov.Type - } - if ov.ParsingMode != 0 { - v.ParsingMode = ov.ParsingMode - } - - // If the override file overrode type without default or vice-versa then - // it may have created an invalid situation, which we'll catch now by - // attempting to re-convert the value. - // - // Note that here we may be re-converting an already-converted base value - // from the base config. This will be a no-op if the type was not changed, - // but in particular might be user-observable in the edge case where the - // literal value in config could've been converted to the overridden type - // constraint but the converted value cannot. In practice, this situation - // should be rare since most of our conversions are interchangable. - if v.Default != cty.NilVal { - val, err := convert.Convert(v.Default, v.Type) - if err != nil { - // What exactly we'll say in the error message here depends on whether - // it was Default or Type that was overridden here. - switch { - case ov.Type != cty.NilType && ov.Default == cty.NilVal: - // If only the type was overridden - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err), - Subject: &ov.DeclRange, - }) - case ov.Type == cty.NilType && ov.Default != cty.NilVal: - // Only the default was overridden - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err), - Subject: &ov.DeclRange, - }) - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err), - Subject: &ov.DeclRange, - }) - } - } else { - v.Default = val - } - } - - return diags -} - -func (l *Local) merge(ol *Local) hcl.Diagnostics { - var diags hcl.Diagnostics - - // Since a local is just a single expression in configuration, the - // override definition entirely replaces the base definition, including - // the source range so that we'll send the user to the right place if - // there is an error. - l.Expr = ol.Expr - l.DeclRange = ol.DeclRange - - return diags -} - -func (o *Output) merge(oo *Output) hcl.Diagnostics { - var diags hcl.Diagnostics - - if oo.Description != "" { - o.Description = oo.Description - } - if oo.Expr != nil { - o.Expr = oo.Expr - } - if oo.SensitiveSet { - o.Sensitive = oo.Sensitive - o.SensitiveSet = oo.SensitiveSet - } - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(oo.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} - -func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { - var diags hcl.Diagnostics - - if omc.SourceSet { - mc.SourceAddr = omc.SourceAddr - mc.SourceAddrRange = omc.SourceAddrRange - mc.SourceSet = omc.SourceSet - } - - if omc.Count != nil { - mc.Count = omc.Count - } - - if omc.ForEach != nil { - mc.ForEach = omc.ForEach - } - - if len(omc.Version.Required) != 0 { - mc.Version = omc.Version - } - - mc.Config = MergeBodies(mc.Config, omc.Config) - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(mc.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} - -func (r *Resource) merge(or *Resource) hcl.Diagnostics { - var diags hcl.Diagnostics - - if r.Mode != or.Mode { - // This is always a programming error, since managed and data resources - // are kept in separate maps in the configuration structures. - panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode)) - } - - if or.Count != nil { - r.Count = or.Count - } - if or.ForEach != nil { - r.ForEach = or.ForEach - } - if or.ProviderConfigRef != nil { - r.ProviderConfigRef = or.ProviderConfigRef - } - if r.Mode == addrs.ManagedResourceMode { - // or.Managed is always non-nil for managed resource mode - - if or.Managed.Connection != nil { - r.Managed.Connection = or.Managed.Connection - } - if or.Managed.CreateBeforeDestroySet { - r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy - r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet - } - if len(or.Managed.IgnoreChanges) != 0 { - r.Managed.IgnoreChanges = or.Managed.IgnoreChanges - } - if or.Managed.PreventDestroySet { - r.Managed.PreventDestroy = or.Managed.PreventDestroy - r.Managed.PreventDestroySet = or.Managed.PreventDestroySet - } - if len(or.Managed.Provisioners) != 0 { - r.Managed.Provisioners = or.Managed.Provisioners - } - } - - r.Config = MergeBodies(r.Config, or.Config) - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(or.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go deleted file mode 100644 index 7b51eae8..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go +++ /dev/null @@ -1,143 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -// MergeBodies creates a new HCL body that contains a combination of the -// given base and override bodies. Attributes and blocks defined in the -// override body take precedence over those of the same name defined in -// the base body. -// -// If any block of a particular type appears in "override" then it will -// replace _all_ of the blocks of the same type in "base" in the new -// body. -func MergeBodies(base, override hcl.Body) hcl.Body { - return mergeBody{ - Base: base, - Override: override, - } -} - -// mergeBody is a hcl.Body implementation that wraps a pair of other bodies -// and allows attributes and blocks within the override to take precedence -// over those defined in the base body. -// -// This is used to deal with dynamically-processed bodies in Module.mergeFile. -// It uses a shallow-only merging strategy where direct attributes defined -// in Override will override attributes of the same name in Base, while any -// blocks defined in Override will hide all blocks of the same type in Base. -// -// This cannot possibly "do the right thing" in all cases, because we don't -// have enough information about user intent. However, this behavior is intended -// to be reasonable for simple overriding use-cases. -type mergeBody struct { - Base hcl.Body - Override hcl.Body -} - -var _ hcl.Body = mergeBody{} - -func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - var diags hcl.Diagnostics - baseSchema := schemaWithDynamic(schema) - overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) - - baseContent, _, cDiags := b.Base.PartialContent(baseSchema) - diags = append(diags, cDiags...) - overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema) - diags = append(diags, cDiags...) - - content := b.prepareContent(baseContent, overrideContent) - - return content, diags -} - -func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - baseSchema := schemaWithDynamic(schema) - overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) - - baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema) - diags = append(diags, cDiags...) - overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema) - diags = append(diags, cDiags...) - - content := b.prepareContent(baseContent, overrideContent) - - remain := MergeBodies(baseRemain, overrideRemain) - - return content, remain, diags -} - -func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent { - content := &hcl.BodyContent{ - Attributes: make(hcl.Attributes), - } - - // For attributes we just assign from each map in turn and let the override - // map clobber any matching entries from base. - for k, a := range base.Attributes { - content.Attributes[k] = a - } - for k, a := range override.Attributes { - content.Attributes[k] = a - } - - // Things are a little more interesting for blocks because they arrive - // as a flat list. Our merging semantics call for us to suppress blocks - // from base if at least one block of the same type appears in override. - // We explicitly do not try to correlate and deeply merge nested blocks, - // since we don't have enough context here to infer user intent. - - overriddenBlockTypes := make(map[string]bool) - for _, block := range override.Blocks { - if block.Type == "dynamic" { - overriddenBlockTypes[block.Labels[0]] = true - continue - } - overriddenBlockTypes[block.Type] = true - } - for _, block := range base.Blocks { - // We skip over dynamic blocks whose type label is an overridden type - // but note that below we do still leave them as dynamic blocks in - // the result because expanding the dynamic blocks that are left is - // done much later during the core graph walks, where we can safely - // evaluate the expressions. - if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] { - continue - } - if overriddenBlockTypes[block.Type] { - continue - } - content.Blocks = append(content.Blocks, block) - } - for _, block := range override.Blocks { - content.Blocks = append(content.Blocks, block) - } - - return content -} - -func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - var diags hcl.Diagnostics - ret := make(hcl.Attributes) - - baseAttrs, aDiags := b.Base.JustAttributes() - diags = append(diags, aDiags...) - overrideAttrs, aDiags := b.Override.JustAttributes() - diags = append(diags, aDiags...) - - for k, a := range baseAttrs { - ret[k] = a - } - for k, a := range overrideAttrs { - ret[k] = a - } - - return ret, diags -} - -func (b mergeBody) MissingItemRange() hcl.Range { - return b.Base.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go deleted file mode 100644 index 8c8398e0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go +++ /dev/null @@ -1,354 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/typeexpr" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// A consistent detail message for all "not a valid identifier" diagnostics. -const badIdentifierDetail = "A name must start with a letter and may contain only letters, digits, underscores, and dashes." - -// Variable represents a "variable" block in a module or file. -type Variable struct { - Name string - Description string - Default cty.Value - Type cty.Type - ParsingMode VariableParsingMode - - DescriptionSet bool - - DeclRange hcl.Range -} - -func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) { - v := &Variable{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - // Unless we're building an override, we'll set some defaults - // which we might override with attributes below. We leave these - // as zero-value in the override case so we can recognize whether - // or not they are set when we merge. - if !override { - v.Type = cty.DynamicPseudoType - v.ParsingMode = VariableParseLiteral - } - - content, diags := block.Body.Content(variableBlockSchema) - - if !hclsyntax.ValidIdentifier(v.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - // Don't allow declaration of variables that would conflict with the - // reserved attribute and block type names in a "module" block, since - // these won't be usable for child modules. - for _, attr := range moduleBlockSchema.Attributes { - if attr.Name == v.Name { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name), - Subject: &block.LabelRanges[0], - }) - } - } - for _, blockS := range moduleBlockSchema.Blocks { - if blockS.Type == v.Name { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type), - Subject: &block.LabelRanges[0], - }) - } - } - - if attr, exists := content.Attributes["description"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description) - diags = append(diags, valDiags...) - v.DescriptionSet = true - } - - if attr, exists := content.Attributes["type"]; exists { - ty, parseMode, tyDiags := decodeVariableType(attr.Expr) - diags = append(diags, tyDiags...) - v.Type = ty - v.ParsingMode = parseMode - } - - if attr, exists := content.Attributes["default"]; exists { - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - - // Convert the default to the expected type so we can catch invalid - // defaults early and allow later code to assume validity. - // Note that this depends on us having already processed any "type" - // attribute above. - // However, we can't do this if we're in an override file where - // the type might not be set; we'll catch that during merge. - if v.Type != cty.NilType { - var err error - val, err = convert.Convert(val, v.Type) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err), - Subject: attr.Expr.Range().Ptr(), - }) - val = cty.DynamicVal - } - } - - v.Default = val - } - - return v, diags -} - -func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) { - if exprIsNativeQuotedString(expr) { - // Here we're accepting the pre-0.12 form of variable type argument where - // the string values "string", "list" and "map" are accepted has a hint - // about the type used primarily for deciding how to parse values - // given on the command line and in environment variables. - // Only the native syntax ends up in this codepath; we handle the - // JSON syntax (which is, of course, quoted even in the new format) - // in the normal codepath below. - val, diags := expr.Value(nil) - if diags.HasErrors() { - return cty.DynamicPseudoType, VariableParseHCL, diags - } - str := val.AsString() - switch str { - case "string": - return cty.String, VariableParseLiteral, diags - case "list": - return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags - case "map": - return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags - default: - return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: "Invalid legacy variable type hint", - Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`, - Subject: expr.Range().Ptr(), - }} - } - } - - // First we'll deal with some shorthand forms that the HCL-level type - // expression parser doesn't include. These both emulate pre-0.12 behavior - // of allowing a list or map of any element type as long as all of the - // elements are consistent. This is the same as list(any) or map(any). - switch hcl.ExprAsKeyword(expr) { - case "list": - return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil - case "map": - return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil - } - - ty, diags := typeexpr.TypeConstraint(expr) - if diags.HasErrors() { - return cty.DynamicPseudoType, VariableParseHCL, diags - } - - switch { - case ty.IsPrimitiveType(): - // Primitive types use literal parsing. - return ty, VariableParseLiteral, diags - default: - // Everything else uses HCL parsing - return ty, VariableParseHCL, diags - } -} - -// VariableParsingMode defines how values of a particular variable given by -// text-only mechanisms (command line arguments and environment variables) -// should be parsed to produce the final value. -type VariableParsingMode rune - -// VariableParseLiteral is a variable parsing mode that just takes the given -// string directly as a cty.String value. -const VariableParseLiteral VariableParsingMode = 'L' - -// VariableParseHCL is a variable parsing mode that attempts to parse the given -// string as an HCL expression and returns the result. -const VariableParseHCL VariableParsingMode = 'H' - -// Parse uses the receiving parsing mode to process the given variable value -// string, returning the result along with any diagnostics. -// -// A VariableParsingMode does not know the expected type of the corresponding -// variable, so it's the caller's responsibility to attempt to convert the -// result to the appropriate type and return to the user any diagnostics that -// conversion may produce. -// -// The given name is used to create a synthetic filename in case any diagnostics -// must be generated about the given string value. This should be the name -// of the root module variable whose value will be populated from the given -// string. -// -// If the returned diagnostics has errors, the returned value may not be -// valid. -func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) { - switch m { - case VariableParseLiteral: - return cty.StringVal(value), nil - case VariableParseHCL: - fakeFilename := fmt.Sprintf("", name) - expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1}) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - val, valDiags := expr.Value(nil) - diags = append(diags, valDiags...) - return val, diags - default: - // Should never happen - panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m)) - } -} - -// Output represents an "output" block in a module or file. -type Output struct { - Name string - Description string - Expr hcl.Expression - DependsOn []hcl.Traversal - Sensitive bool - - DescriptionSet bool - SensitiveSet bool - - DeclRange hcl.Range -} - -func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) { - o := &Output{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - schema := outputBlockSchema - if override { - schema = schemaForOverrides(schema) - } - - content, diags := block.Body.Content(schema) - - if !hclsyntax.ValidIdentifier(o.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid output name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - if attr, exists := content.Attributes["description"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description) - diags = append(diags, valDiags...) - o.DescriptionSet = true - } - - if attr, exists := content.Attributes["value"]; exists { - o.Expr = attr.Expr - } - - if attr, exists := content.Attributes["sensitive"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive) - diags = append(diags, valDiags...) - o.SensitiveSet = true - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - o.DependsOn = append(o.DependsOn, deps...) - } - - return o, diags -} - -// Local represents a single entry from a "locals" block in a module or file. -// The "locals" block itself is not represented, because it serves only to -// provide context for us to interpret its contents. -type Local struct { - Name string - Expr hcl.Expression - - DeclRange hcl.Range -} - -func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) { - attrs, diags := block.Body.JustAttributes() - if len(attrs) == 0 { - return nil, diags - } - - locals := make([]*Local, 0, len(attrs)) - for name, attr := range attrs { - if !hclsyntax.ValidIdentifier(name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid local value name", - Detail: badIdentifierDetail, - Subject: &attr.NameRange, - }) - } - - locals = append(locals, &Local{ - Name: name, - Expr: attr.Expr, - DeclRange: attr.Range, - }) - } - return locals, diags -} - -var variableBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - { - Name: "default", - }, - { - Name: "type", - }, - }, -} - -var outputBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - { - Name: "value", - Required: true, - }, - { - Name: "depends_on", - }, - { - Name: "sensitive", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go deleted file mode 100644 index 2a621b57..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go +++ /dev/null @@ -1,100 +0,0 @@ -package configs - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclparse" - "github.com/spf13/afero" -) - -// Parser is the main interface to read configuration files and other related -// files from disk. -// -// It retains a cache of all files that are loaded so that they can be used -// to create source code snippets in diagnostics, etc. -type Parser struct { - fs afero.Afero - p *hclparse.Parser -} - -// NewParser creates and returns a new Parser that reads files from the given -// filesystem. If a nil filesystem is passed then the system's "real" filesystem -// will be used, via afero.OsFs. -func NewParser(fs afero.Fs) *Parser { - if fs == nil { - fs = afero.OsFs{} - } - - return &Parser{ - fs: afero.Afero{Fs: fs}, - p: hclparse.NewParser(), - } -} - -// LoadHCLFile is a low-level method that reads the file at the given path, -// parses it, and returns the hcl.Body representing its root. In many cases -// it is better to use one of the other Load*File methods on this type, -// which additionally decode the root body in some way and return a higher-level -// construct. -// -// If the file cannot be read at all -- e.g. because it does not exist -- then -// this method will return a nil body and error diagnostics. In this case -// callers may wish to ignore the provided error diagnostics and produce -// a more context-sensitive error instead. -// -// The file will be parsed using the HCL native syntax unless the filename -// ends with ".json", in which case the HCL JSON syntax will be used. -func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) { - src, err := p.fs.ReadFile(path) - - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The file %q could not be read.", path), - }, - } - } - - var file *hcl.File - var diags hcl.Diagnostics - switch { - case strings.HasSuffix(path, ".json"): - file, diags = p.p.ParseJSON(src, path) - default: - file, diags = p.p.ParseHCL(src, path) - } - - // If the returned file or body is nil, then we'll return a non-nil empty - // body so we'll meet our contract that nil means an error reading the file. - if file == nil || file.Body == nil { - return hcl.EmptyBody(), diags - } - - return file.Body, diags -} - -// Sources returns a map of the cached source buffers for all files that -// have been loaded through this parser, with source filenames (as requested -// when each file was opened) as the keys. -func (p *Parser) Sources() map[string][]byte { - return p.p.Sources() -} - -// ForceFileSource artificially adds source code to the cache of file sources, -// as if it had been loaded from the given filename. -// -// This should be used only in special situations where configuration is loaded -// some other way. Most callers should load configuration via methods of -// Parser, which will update the sources cache automatically. -func (p *Parser) ForceFileSource(filename string, src []byte) { - // We'll make a synthetic hcl.File here just so we can reuse the - // existing cache. - p.p.AddFile(filename, &hcl.File{ - Body: hcl.EmptyBody(), - Bytes: src, - }) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go deleted file mode 100644 index d4cbc945..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go +++ /dev/null @@ -1,247 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -// LoadConfigFile reads the file at the given path and parses it as a config -// file. -// -// If the file cannot be read -- for example, if it does not exist -- then -// a nil *File will be returned along with error diagnostics. Callers may wish -// to disregard the returned diagnostics in this case and instead generate -// their own error message(s) with additional context. -// -// If the returned diagnostics has errors when a non-nil map is returned -// then the map may be incomplete but should be valid enough for careful -// static analysis. -// -// This method wraps LoadHCLFile, and so it inherits the syntax selection -// behaviors documented for that method. -func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) { - return p.loadConfigFile(path, false) -} - -// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes -// certain required attribute constraints in order to interpret the given -// file as an overrides file. -func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) { - return p.loadConfigFile(path, true) -} - -func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) { - - body, diags := p.LoadHCLFile(path) - if body == nil { - return nil, diags - } - - file := &File{} - - var reqDiags hcl.Diagnostics - file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body) - diags = append(diags, reqDiags...) - - content, contentDiags := body.Content(configFileSchema) - diags = append(diags, contentDiags...) - - for _, block := range content.Blocks { - switch block.Type { - - case "terraform": - content, contentDiags := block.Body.Content(terraformBlockSchema) - diags = append(diags, contentDiags...) - - // We ignore the "terraform_version" attribute here because - // sniffCoreVersionRequirements already dealt with that above. - - for _, innerBlock := range content.Blocks { - switch innerBlock.Type { - - case "backend": - backendCfg, cfgDiags := decodeBackendBlock(innerBlock) - diags = append(diags, cfgDiags...) - if backendCfg != nil { - file.Backends = append(file.Backends, backendCfg) - } - - case "required_providers": - reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) - diags = append(diags, reqsDiags...) - file.ProviderRequirements = append(file.ProviderRequirements, reqs...) - - default: - // Should never happen because the above cases should be exhaustive - // for all block type names in our schema. - continue - - } - } - - case "provider": - cfg, cfgDiags := decodeProviderBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ProviderConfigs = append(file.ProviderConfigs, cfg) - } - - case "variable": - cfg, cfgDiags := decodeVariableBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.Variables = append(file.Variables, cfg) - } - - case "locals": - defs, defsDiags := decodeLocalsBlock(block) - diags = append(diags, defsDiags...) - file.Locals = append(file.Locals, defs...) - - case "output": - cfg, cfgDiags := decodeOutputBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.Outputs = append(file.Outputs, cfg) - } - - case "module": - cfg, cfgDiags := decodeModuleBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ModuleCalls = append(file.ModuleCalls, cfg) - } - - case "resource": - cfg, cfgDiags := decodeResourceBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ManagedResources = append(file.ManagedResources, cfg) - } - - case "data": - cfg, cfgDiags := decodeDataBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.DataResources = append(file.DataResources, cfg) - } - - default: - // Should never happen because the above cases should be exhaustive - // for all block type names in our schema. - continue - - } - } - - return file, diags -} - -// sniffCoreVersionRequirements does minimal parsing of the given body for -// "terraform" blocks with "required_version" attributes, returning the -// requirements found. -// -// This is intended to maximize the chance that we'll be able to read the -// requirements (syntax errors notwithstanding) even if the config file contains -// constructs that might've been added in future Terraform versions -// -// This is a "best effort" sort of method which will return constraints it is -// able to find, but may return no constraints at all if the given body is -// so invalid that it cannot be decoded at all. -func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) { - rootContent, _, diags := body.PartialContent(configFileVersionSniffRootSchema) - - var constraints []VersionConstraint - - for _, block := range rootContent.Blocks { - content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema) - diags = append(diags, blockDiags...) - - attr, exists := content.Attributes["required_version"] - if !exists { - continue - } - - constraint, constraintDiags := decodeVersionConstraint(attr) - diags = append(diags, constraintDiags...) - if !constraintDiags.HasErrors() { - constraints = append(constraints, constraint) - } - } - - return constraints, diags -} - -// configFileSchema is the schema for the top-level of a config file. We use -// the low-level HCL API for this level so we can easily deal with each -// block type separately with its own decoding logic. -var configFileSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - }, - { - Type: "provider", - LabelNames: []string{"name"}, - }, - { - Type: "variable", - LabelNames: []string{"name"}, - }, - { - Type: "locals", - }, - { - Type: "output", - LabelNames: []string{"name"}, - }, - { - Type: "module", - LabelNames: []string{"name"}, - }, - { - Type: "resource", - LabelNames: []string{"type", "name"}, - }, - { - Type: "data", - LabelNames: []string{"type", "name"}, - }, - }, -} - -// terraformBlockSchema is the schema for a top-level "terraform" block in -// a configuration file. -var terraformBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "required_version", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "backend", - LabelNames: []string{"type"}, - }, - { - Type: "required_providers", - }, - }, -} - -// configFileVersionSniffRootSchema is a schema for sniffCoreVersionRequirements -var configFileVersionSniffRootSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - }, - }, -} - -// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements -var configFileVersionSniffBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "required_version", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go deleted file mode 100644 index afdd6983..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go +++ /dev/null @@ -1,163 +0,0 @@ -package configs - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// LoadConfigDir reads the .tf and .tf.json files in the given directory -// as config files (using LoadConfigFile) and then combines these files into -// a single Module. -// -// If this method returns nil, that indicates that the given directory does not -// exist at all or could not be opened for some reason. Callers may wish to -// detect this case and ignore the returned diagnostics so that they can -// produce a more context-aware error message in that case. -// -// If this method returns a non-nil module while error diagnostics are returned -// then the module may be incomplete but can be used carefully for static -// analysis. -// -// This file does not consider a directory with no files to be an error, and -// will simply return an empty module in that case. Callers should first call -// Parser.IsConfigDir if they wish to recognize that situation. -// -// .tf files are parsed using the HCL native syntax while .tf.json files are -// parsed using the HCL JSON syntax. -func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) { - primaryPaths, overridePaths, diags := p.dirFiles(path) - if diags.HasErrors() { - return nil, diags - } - - primary, fDiags := p.loadFiles(primaryPaths, false) - diags = append(diags, fDiags...) - override, fDiags := p.loadFiles(overridePaths, true) - diags = append(diags, fDiags...) - - mod, modDiags := NewModule(primary, override) - diags = append(diags, modDiags...) - - mod.SourceDir = path - - return mod, diags -} - -// ConfigDirFiles returns lists of the primary and override files configuration -// files in the given directory. -// -// If the given directory does not exist or cannot be read, error diagnostics -// are returned. If errors are returned, the resulting lists may be incomplete. -func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { - return p.dirFiles(dir) -} - -// IsConfigDir determines whether the given path refers to a directory that -// exists and contains at least one Terraform config file (with a .tf or -// .tf.json extension.) -func (p *Parser) IsConfigDir(path string) bool { - primaryPaths, overridePaths, _ := p.dirFiles(path) - return (len(primaryPaths) + len(overridePaths)) > 0 -} - -func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) { - var files []*File - var diags hcl.Diagnostics - - for _, path := range paths { - var f *File - var fDiags hcl.Diagnostics - if override { - f, fDiags = p.LoadConfigFileOverride(path) - } else { - f, fDiags = p.LoadConfigFile(path) - } - diags = append(diags, fDiags...) - if f != nil { - files = append(files, f) - } - } - - return files, diags -} - -func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { - infos, err := p.fs.ReadDir(dir) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Failed to read module directory", - Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), - }) - return - } - - for _, info := range infos { - if info.IsDir() { - // We only care about files - continue - } - - name := info.Name() - ext := fileExt(name) - if ext == "" || IsIgnoredFile(name) { - continue - } - - baseName := name[:len(name)-len(ext)] // strip extension - isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") - - fullPath := filepath.Join(dir, name) - if isOverride { - override = append(override, fullPath) - } else { - primary = append(primary, fullPath) - } - } - - return -} - -// fileExt returns the Terraform configuration extension of the given -// path, or a blank string if it is not a recognized extension. -func fileExt(path string) string { - if strings.HasSuffix(path, ".tf") { - return ".tf" - } else if strings.HasSuffix(path, ".tf.json") { - return ".tf.json" - } else { - return "" - } -} - -// IsIgnoredFile returns true if the given filename (which must not have a -// directory path ahead of it) should be ignored as e.g. an editor swap file. -func IsIgnoredFile(name string) bool { - return strings.HasPrefix(name, ".") || // Unix-like hidden files - strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs -} - -// IsEmptyDir returns true if the given filesystem path contains no Terraform -// configuration files. -// -// Unlike the methods of the Parser type, this function always consults the -// real filesystem, and thus it isn't appropriate to use when working with -// configuration loaded from a plan file. -func IsEmptyDir(path string) (bool, error) { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - return true, nil - } - - p := NewParser(nil) - fs, os, err := p.dirFiles(path) - if err != nil { - return false, err - } - - return len(fs) == 0 && len(os) == 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go deleted file mode 100644 index 10d98e5b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go +++ /dev/null @@ -1,43 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// LoadValuesFile reads the file at the given path and parses it as a "values -// file", which is an HCL config file whose top-level attributes are treated -// as arbitrary key.value pairs. -// -// If the file cannot be read -- for example, if it does not exist -- then -// a nil map will be returned along with error diagnostics. Callers may wish -// to disregard the returned diagnostics in this case and instead generate -// their own error message(s) with additional context. -// -// If the returned diagnostics has errors when a non-nil map is returned -// then the map may be incomplete but should be valid enough for careful -// static analysis. -// -// This method wraps LoadHCLFile, and so it inherits the syntax selection -// behaviors documented for that method. -func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) { - body, diags := p.LoadHCLFile(path) - if body == nil { - return nil, diags - } - - vals := make(map[string]cty.Value) - attrs, attrDiags := body.JustAttributes() - diags = append(diags, attrDiags...) - if attrs == nil { - return vals, diags - } - - for name, attr := range attrs { - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - vals[name] = val - } - - return vals, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go deleted file mode 100644 index cb9ba1f3..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go +++ /dev/null @@ -1,144 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Provider represents a "provider" block in a module or file. A provider -// block is a provider configuration, and there can be zero or more -// configurations for each actual provider. -type Provider struct { - Name string - NameRange hcl.Range - Alias string - AliasRange *hcl.Range // nil if no alias set - - Version VersionConstraint - - Config hcl.Body - - DeclRange hcl.Range -} - -func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { - content, config, diags := block.Body.PartialContent(providerBlockSchema) - - provider := &Provider{ - Name: block.Labels[0], - NameRange: block.LabelRanges[0], - Config: config, - DeclRange: block.DefRange, - } - - if attr, exists := content.Attributes["alias"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) - diags = append(diags, valDiags...) - provider.AliasRange = attr.Expr.Range().Ptr() - - if !hclsyntax.ValidIdentifier(provider.Alias) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration alias", - Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), - }) - } - } - - if attr, exists := content.Attributes["version"]; exists { - var versionDiags hcl.Diagnostics - provider.Version, versionDiags = decodeVersionConstraint(attr) - diags = append(diags, versionDiags...) - } - - // Reserved attribute names - for _, name := range []string{"count", "depends_on", "for_each", "source"} { - if attr, exists := content.Attributes[name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in provider block", - Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), - Subject: &attr.NameRange, - }) - } - } - - // Reserved block types (all of them) - for _, block := range content.Blocks { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in provider block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - - return provider, diags -} - -// Addr returns the address of the receiving provider configuration, relative -// to its containing module. -func (p *Provider) Addr() addrs.ProviderConfig { - return addrs.ProviderConfig{ - Type: p.Name, - Alias: p.Alias, - } -} - -func (p *Provider) moduleUniqueKey() string { - if p.Alias != "" { - return fmt.Sprintf("%s.%s", p.Name, p.Alias) - } - return p.Name -} - -// ProviderRequirement represents a declaration of a dependency on a particular -// provider version without actually configuring that provider. This is used in -// child modules that expect a provider to be passed in from their parent. -type ProviderRequirement struct { - Name string - Requirement VersionConstraint -} - -func decodeRequiredProvidersBlock(block *hcl.Block) ([]*ProviderRequirement, hcl.Diagnostics) { - attrs, diags := block.Body.JustAttributes() - var reqs []*ProviderRequirement - for name, attr := range attrs { - req, reqDiags := decodeVersionConstraint(attr) - diags = append(diags, reqDiags...) - if !diags.HasErrors() { - reqs = append(reqs, &ProviderRequirement{ - Name: name, - Requirement: req, - }) - } - } - return reqs, diags -} - -var providerBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "alias", - }, - { - Name: "version", - }, - - // Attribute names reserved for future expansion. - {Name: "count"}, - {Name: "depends_on"}, - {Name: "for_each"}, - {Name: "source"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - // _All_ of these are reserved for future expansion. - {Type: "lifecycle"}, - {Type: "locals"}, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go deleted file mode 100644 index 47b65679..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go +++ /dev/null @@ -1,150 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" -) - -// Provisioner represents a "provisioner" block when used within a -// "resource" block in a module or file. -type Provisioner struct { - Type string - Config hcl.Body - Connection *Connection - When ProvisionerWhen - OnFailure ProvisionerOnFailure - - DeclRange hcl.Range - TypeRange hcl.Range -} - -func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { - pv := &Provisioner{ - Type: block.Labels[0], - TypeRange: block.LabelRanges[0], - DeclRange: block.DefRange, - When: ProvisionerWhenCreate, - OnFailure: ProvisionerOnFailureFail, - } - - content, config, diags := block.Body.PartialContent(provisionerBlockSchema) - pv.Config = config - - if attr, exists := content.Attributes["when"]; exists { - expr, shimDiags := shimTraversalInString(attr.Expr, true) - diags = append(diags, shimDiags...) - - switch hcl.ExprAsKeyword(expr) { - case "create": - pv.When = ProvisionerWhenCreate - case "destroy": - pv.When = ProvisionerWhenDestroy - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid \"when\" keyword", - Detail: "The \"when\" argument requires one of the following keywords: create or destroy.", - Subject: expr.Range().Ptr(), - }) - } - } - - if attr, exists := content.Attributes["on_failure"]; exists { - expr, shimDiags := shimTraversalInString(attr.Expr, true) - diags = append(diags, shimDiags...) - - switch hcl.ExprAsKeyword(expr) { - case "continue": - pv.OnFailure = ProvisionerOnFailureContinue - case "fail": - pv.OnFailure = ProvisionerOnFailureFail - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid \"on_failure\" keyword", - Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - - var seenConnection *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - - case "connection": - if seenConnection != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate connection block", - Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenConnection = block - - //conn, connDiags := decodeConnectionBlock(block) - //diags = append(diags, connDiags...) - pv.Connection = &Connection{ - Config: block.Body, - DeclRange: block.DefRange, - } - - default: - // Any other block types are ones we've reserved for future use, - // so they get a generic message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in provisioner block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return pv, diags -} - -// Connection represents a "connection" block when used within either a -// "resource" or "provisioner" block in a module or file. -type Connection struct { - Config hcl.Body - - DeclRange hcl.Range -} - -// ProvisionerWhen is an enum for valid values for when to run provisioners. -type ProvisionerWhen int - -//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerWhen - -const ( - ProvisionerWhenInvalid ProvisionerWhen = iota - ProvisionerWhenCreate - ProvisionerWhenDestroy -) - -// ProvisionerOnFailure is an enum for valid values for on_failure options -// for provisioners. -type ProvisionerOnFailure int - -//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerOnFailure - -const ( - ProvisionerOnFailureInvalid ProvisionerOnFailure = iota - ProvisionerOnFailureContinue - ProvisionerOnFailureFail -) - -var provisionerBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - {Name: "when"}, - {Name: "on_failure"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "connection"}, - {Type: "lifecycle"}, // reserved for future use - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go deleted file mode 100644 index 7ff5a6e0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ProvisionerOnFailureInvalid-0] - _ = x[ProvisionerOnFailureContinue-1] - _ = x[ProvisionerOnFailureFail-2] -} - -const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail" - -var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79} - -func (i ProvisionerOnFailure) String() string { - if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) { - return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go deleted file mode 100644 index 9f21b3ac..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ProvisionerWhenInvalid-0] - _ = x[ProvisionerWhenCreate-1] - _ = x[ProvisionerWhenDestroy-2] -} - -const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy" - -var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65} - -func (i ProvisionerWhen) String() string { - if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) { - return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go deleted file mode 100644 index cd9991a3..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go +++ /dev/null @@ -1,490 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Resource represents a "resource" or "data" block in a module or file. -type Resource struct { - Mode addrs.ResourceMode - Name string - Type string - Config hcl.Body - Count hcl.Expression - ForEach hcl.Expression - - ProviderConfigRef *ProviderConfigRef - - DependsOn []hcl.Traversal - - // Managed is populated only for Mode = addrs.ManagedResourceMode, - // containing the additional fields that apply to managed resources. - // For all other resource modes, this field is nil. - Managed *ManagedResource - - DeclRange hcl.Range - TypeRange hcl.Range -} - -// ManagedResource represents a "resource" block in a module or file. -type ManagedResource struct { - Connection *Connection - Provisioners []*Provisioner - - CreateBeforeDestroy bool - PreventDestroy bool - IgnoreChanges []hcl.Traversal - IgnoreAllChanges bool - - CreateBeforeDestroySet bool - PreventDestroySet bool -} - -func (r *Resource) moduleUniqueKey() string { - return r.Addr().String() -} - -// Addr returns a resource address for the receiver that is relative to the -// resource's containing module. -func (r *Resource) Addr() addrs.Resource { - return addrs.Resource{ - Mode: r.Mode, - Type: r.Type, - Name: r.Name, - } -} - -// ProviderConfigAddr returns the address for the provider configuration -// that should be used for this resource. This function implements the -// default behavior of extracting the type from the resource type name if -// an explicit "provider" argument was not provided. -func (r *Resource) ProviderConfigAddr() addrs.ProviderConfig { - if r.ProviderConfigRef == nil { - return r.Addr().DefaultProviderConfig() - } - - return addrs.ProviderConfig{ - Type: r.ProviderConfigRef.Name, - Alias: r.ProviderConfigRef.Alias, - } -} - -func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { - r := &Resource{ - Mode: addrs.ManagedResourceMode, - Type: block.Labels[0], - Name: block.Labels[1], - DeclRange: block.DefRange, - TypeRange: block.LabelRanges[0], - Managed: &ManagedResource{}, - } - - content, remain, diags := block.Body.PartialContent(resourceBlockSchema) - r.Config = remain - - if !hclsyntax.ValidIdentifier(r.Type) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - if !hclsyntax.ValidIdentifier(r.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[1], - }) - } - - if attr, exists := content.Attributes["count"]; exists { - r.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - r.ForEach = attr.Expr - // Cannot have count and for_each on the same resource block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - } - - if attr, exists := content.Attributes["provider"]; exists { - var providerDiags hcl.Diagnostics - r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") - diags = append(diags, providerDiags...) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - r.DependsOn = append(r.DependsOn, deps...) - } - - var seenLifecycle *hcl.Block - var seenConnection *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - case "lifecycle": - if seenLifecycle != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate lifecycle block", - Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenLifecycle = block - - lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) - diags = append(diags, lcDiags...) - - if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) - diags = append(diags, valDiags...) - r.Managed.CreateBeforeDestroySet = true - } - - if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) - diags = append(diags, valDiags...) - r.Managed.PreventDestroySet = true - } - - if attr, exists := lcContent.Attributes["ignore_changes"]; exists { - - // ignore_changes can either be a list of relative traversals - // or it can be just the keyword "all" to ignore changes to this - // resource entirely. - // ignore_changes = [ami, instance_type] - // ignore_changes = all - // We also allow two legacy forms for compatibility with earlier - // versions: - // ignore_changes = ["ami", "instance_type"] - // ignore_changes = ["*"] - - kw := hcl.ExprAsKeyword(attr.Expr) - - switch { - case kw == "all": - r.Managed.IgnoreAllChanges = true - default: - exprs, listDiags := hcl.ExprList(attr.Expr) - diags = append(diags, listDiags...) - - var ignoreAllRange hcl.Range - - for _, expr := range exprs { - - // our expr might be the literal string "*", which - // we accept as a deprecated way of saying "all". - if shimIsIgnoreChangesStar(expr) { - r.Managed.IgnoreAllChanges = true - ignoreAllRange = expr.Range() - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Deprecated ignore_changes wildcard", - Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.", - Subject: attr.Expr.Range().Ptr(), - }) - continue - } - - expr, shimDiags := shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.RelTraversalForExpr(expr) - diags = append(diags, travDiags...) - if len(traversal) != 0 { - r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) - } - } - - if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid ignore_changes ruleset", - Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", - Subject: &ignoreAllRange, - Context: attr.Expr.Range().Ptr(), - }) - } - - } - - } - - case "connection": - if seenConnection != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate connection block", - Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenConnection = block - - r.Managed.Connection = &Connection{ - Config: block.Body, - DeclRange: block.DefRange, - } - - case "provisioner": - pv, pvDiags := decodeProvisionerBlock(block) - diags = append(diags, pvDiags...) - if pv != nil { - r.Managed.Provisioners = append(r.Managed.Provisioners, pv) - } - - default: - // Any other block types are ones we've reserved for future use, - // so they get a generic message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in resource block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return r, diags -} - -func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { - r := &Resource{ - Mode: addrs.DataResourceMode, - Type: block.Labels[0], - Name: block.Labels[1], - DeclRange: block.DefRange, - TypeRange: block.LabelRanges[0], - } - - content, remain, diags := block.Body.PartialContent(dataBlockSchema) - r.Config = remain - - if !hclsyntax.ValidIdentifier(r.Type) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - if !hclsyntax.ValidIdentifier(r.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data resource name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[1], - }) - } - - if attr, exists := content.Attributes["count"]; exists { - r.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - r.ForEach = attr.Expr - // Cannot have count and for_each on the same data block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - } - - if attr, exists := content.Attributes["provider"]; exists { - var providerDiags hcl.Diagnostics - r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") - diags = append(diags, providerDiags...) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - r.DependsOn = append(r.DependsOn, deps...) - } - - for _, block := range content.Blocks { - // All of the block types we accept are just reserved for future use, but some get a specialized error message. - switch block.Type { - case "lifecycle": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported lifecycle block", - Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", - Subject: &block.DefRange, - }) - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in data block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return r, diags -} - -type ProviderConfigRef struct { - Name string - NameRange hcl.Range - Alias string - AliasRange *hcl.Range // nil if alias not set -} - -func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var shimDiags hcl.Diagnostics - expr, shimDiags = shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.AbsTraversalForExpr(expr) - - // AbsTraversalForExpr produces only generic errors, so we'll discard - // the errors given and produce our own with extra context. If we didn't - // get any errors then we might still have warnings, though. - if !travDiags.HasErrors() { - diags = append(diags, travDiags...) - } - - if len(traversal) < 1 || len(traversal) > 2 { - // A provider reference was given as a string literal in the legacy - // configuration language and there are lots of examples out there - // showing that usage, so we'll sniff for that situation here and - // produce a specialized error message for it to help users find - // the new correct form. - if exprIsNativeQuotedString(expr) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: "A provider configuration reference must not be given in quotes.", - Subject: expr.Range().Ptr(), - }) - return nil, diags - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), - Subject: expr.Range().Ptr(), - }) - return nil, diags - } - - ret := &ProviderConfigRef{ - Name: traversal.RootName(), - NameRange: traversal[0].SourceRange(), - } - - if len(traversal) > 1 { - aliasStep, ok := traversal[1].(hcl.TraverseAttr) - if !ok { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", - Subject: traversal[1].SourceRange().Ptr(), - }) - return ret, diags - } - - ret.Alias = aliasStep.Name - ret.AliasRange = aliasStep.SourceRange().Ptr() - } - - return ret, diags -} - -// Addr returns the provider config address corresponding to the receiving -// config reference. -// -// This is a trivial conversion, essentially just discarding the source -// location information and keeping just the addressing information. -func (r *ProviderConfigRef) Addr() addrs.ProviderConfig { - return addrs.ProviderConfig{ - Type: r.Name, - Alias: r.Alias, - } -} - -func (r *ProviderConfigRef) String() string { - if r == nil { - return "" - } - if r.Alias != "" { - return fmt.Sprintf("%s.%s", r.Name, r.Alias) - } - return r.Name -} - -var commonResourceAttributes = []hcl.AttributeSchema{ - { - Name: "count", - }, - { - Name: "for_each", - }, - { - Name: "provider", - }, - { - Name: "depends_on", - }, -} - -var resourceBlockSchema = &hcl.BodySchema{ - Attributes: commonResourceAttributes, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "locals"}, // reserved for future use - {Type: "lifecycle"}, - {Type: "connection"}, - {Type: "provisioner", LabelNames: []string{"type"}}, - }, -} - -var dataBlockSchema = &hcl.BodySchema{ - Attributes: commonResourceAttributes, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "lifecycle"}, // reserved for future use - {Type: "locals"}, // reserved for future use - }, -} - -var resourceLifecycleBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "create_before_destroy", - }, - { - Name: "prevent_destroy", - }, - { - Name: "ignore_changes", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go deleted file mode 100644 index cd914e5d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go +++ /dev/null @@ -1,118 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes -// corresponding to the elements given in the values map. -// -// This is useful in situations where, for example, values provided on the -// command line can override values given in configuration, using MergeBodies. -// -// The given filename is used in case any diagnostics are returned. Since -// the created body is synthetic, it is likely that this will not be a "real" -// filename. For example, if from a command line argument it could be -// a representation of that argument's name, such as "-var=...". -func SynthBody(filename string, values map[string]cty.Value) hcl.Body { - return synthBody{ - Filename: filename, - Values: values, - } -} - -type synthBody struct { - Filename string - Values map[string]cty.Value -} - -func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, remain, diags := b.PartialContent(schema) - remainS := remain.(synthBody) - for name := range remainS.Values { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported attribute", - Detail: fmt.Sprintf("An attribute named %q is not expected here.", name), - Subject: b.synthRange().Ptr(), - }) - } - return content, diags -} - -func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - content := &hcl.BodyContent{ - Attributes: make(hcl.Attributes), - MissingItemRange: b.synthRange(), - } - - remainValues := make(map[string]cty.Value) - for attrName, val := range b.Values { - remainValues[attrName] = val - } - - for _, attrS := range schema.Attributes { - delete(remainValues, attrS.Name) - val, defined := b.Values[attrS.Name] - if !defined { - if attrS.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing required attribute", - Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), - Subject: b.synthRange().Ptr(), - }) - } - continue - } - content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val) - } - - // We just ignore blocks altogether, because this body type never has - // nested blocks. - - remain := synthBody{ - Filename: b.Filename, - Values: remainValues, - } - - return content, remain, diags -} - -func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - ret := make(hcl.Attributes) - for name, val := range b.Values { - ret[name] = b.synthAttribute(name, val) - } - return ret, nil -} - -func (b synthBody) MissingItemRange() hcl.Range { - return b.synthRange() -} - -func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute { - rng := b.synthRange() - return &hcl.Attribute{ - Name: name, - Expr: &hclsyntax.LiteralValueExpr{ - Val: val, - SrcRange: rng, - }, - NameRange: rng, - Range: rng, - } -} - -func (b synthBody) synthRange() hcl.Range { - return hcl.Range{ - Filename: b.Filename, - Start: hcl.Pos{Line: 1, Column: 1}, - End: hcl.Pos{Line: 1, Column: 1}, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go deleted file mode 100644 index e135546f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go +++ /dev/null @@ -1,63 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// exprIsNativeQuotedString determines whether the given expression looks like -// it's a quoted string in the HCL native syntax. -// -// This should be used sparingly only for situations where our legacy HCL -// decoding would've expected a keyword or reference in quotes but our new -// decoding expects the keyword or reference to be provided directly as -// an identifier-based expression. -func exprIsNativeQuotedString(expr hcl.Expression) bool { - _, ok := expr.(*hclsyntax.TemplateExpr) - return ok -} - -// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is -// equivalent except that any required attributes are forced to not be required. -// -// This is useful for dealing with "override" config files, which are allowed -// to omit things that they don't wish to override from the main configuration. -// -// The returned schema may have some pointers in common with the given schema, -// so neither the given schema nor the returned schema should be modified after -// using this function in order to avoid confusion. -// -// Overrides are rarely used, so it's recommended to just create the override -// schema on the fly only when it's needed, rather than storing it in a global -// variable as we tend to do for a primary schema. -func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), - Blocks: schema.Blocks, - } - - for i, attrS := range schema.Attributes { - ret.Attributes[i] = attrS - ret.Attributes[i].Required = false - } - - return ret -} - -// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that -// is equivalent except that it accepts an additional block type "dynamic" with -// a single label, used to recognize usage of the HCL dynamic block extension. -func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), - } - - copy(ret.Blocks, schema.Blocks) - ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ - Type: "dynamic", - LabelNames: []string{"type"}, - }) - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go deleted file mode 100644 index c02ad4b5..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go +++ /dev/null @@ -1,45 +0,0 @@ -package configs - -// VariableTypeHint is an enumeration used for the Variable.TypeHint field, -// which is an incompletely-specified type for the variable which is used -// as a hint for whether a value provided in an ambiguous context (on the -// command line or in an environment variable) should be taken literally as a -// string or parsed as an HCL expression to produce a data structure. -// -// The type hint is applied to runtime values as well, but since it does not -// accurately describe a precise type it is not fully-sufficient to infer -// the dynamic type of a value passed through a variable. -// -// These hints use inaccurate terminology for historical reasons. Full details -// are in the documentation for each constant in this enumeration, but in -// summary: -// -// TypeHintString requires a primitive type -// TypeHintList requires a type that could be converted to a tuple -// TypeHintMap requires a type that could be converted to an object -type VariableTypeHint rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type VariableTypeHint - -// TypeHintNone indicates the absence of a type hint. Values specified in -// ambiguous contexts will be treated as literal strings, as if TypeHintString -// were selected, but no runtime value checks will be applied. This is reasonable -// type hint for a module that is never intended to be used at the top-level -// of a configuration, since descendent modules never receive values from -// ambiguous contexts. -const TypeHintNone VariableTypeHint = 0 - -// TypeHintString spec indicates that a value provided in an ambiguous context -// should be treated as a literal string, and additionally requires that the -// runtime value for the variable is of a primitive type (string, number, bool). -const TypeHintString VariableTypeHint = 'S' - -// TypeHintList indicates that a value provided in an ambiguous context should -// be treated as an HCL expression, and additionally requires that the -// runtime value for the variable is of an tuple, list, or set type. -const TypeHintList VariableTypeHint = 'L' - -// TypeHintMap indicates that a value provided in an ambiguous context should -// be treated as an HCL expression, and additionally requires that the -// runtime value for the variable is of an object or map type. -const TypeHintMap VariableTypeHint = 'M' diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go deleted file mode 100644 index 2b50428c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go +++ /dev/null @@ -1,39 +0,0 @@ -// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TypeHintNone-0] - _ = x[TypeHintString-83] - _ = x[TypeHintList-76] - _ = x[TypeHintMap-77] -} - -const ( - _VariableTypeHint_name_0 = "TypeHintNone" - _VariableTypeHint_name_1 = "TypeHintListTypeHintMap" - _VariableTypeHint_name_2 = "TypeHintString" -) - -var ( - _VariableTypeHint_index_1 = [...]uint8{0, 12, 23} -) - -func (i VariableTypeHint) String() string { - switch { - case i == 0: - return _VariableTypeHint_name_0 - case 76 <= i && i <= 77: - i -= 76 - return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]] - case i == 83: - return _VariableTypeHint_name_2 - default: - return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go deleted file mode 100644 index 0f541dc7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go +++ /dev/null @@ -1,71 +0,0 @@ -package configs - -import ( - "fmt" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// VersionConstraint represents a version constraint on some resource -// (e.g. Terraform Core, a provider, a module, ...) that carries with it -// a source range so that a helpful diagnostic can be printed in the event -// that a particular constraint does not match. -type VersionConstraint struct { - Required version.Constraints - DeclRange hcl.Range -} - -func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) { - ret := VersionConstraint{ - DeclRange: attr.Range, - } - - val, diags := attr.Expr.Value(nil) - if diags.HasErrors() { - return ret, diags - } - var err error - val, err = convert.Convert(val, cty.String) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: fmt.Sprintf("A string value is required for %s.", attr.Name), - Subject: attr.Expr.Range().Ptr(), - }) - return ret, diags - } - - if val.IsNull() { - // A null version constraint is strange, but we'll just treat it - // like an empty constraint set. - return ret, diags - } - - if !val.IsWhollyKnown() { - // If there is a syntax error, HCL sets the value of the given attribute - // to cty.DynamicVal. A diagnostic for the syntax error will already - // bubble up, so we will move forward gracefully here. - return ret, diags - } - - constraintStr := val.AsString() - constraints, err := version.NewConstraint(constraintStr) - if err != nil { - // NewConstraint doesn't return user-friendly errors, so we'll just - // ignore the provided error and produce our own generic one. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: "This string does not use correct version constraint syntax.", // Not very actionable :( - Subject: attr.Expr.Range().Ptr(), - }) - return ret, diags - } - - ret.Required = constraints - return ret, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go deleted file mode 100644 index a150af96..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go +++ /dev/null @@ -1,301 +0,0 @@ -package dag - -import ( - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/go-multierror" -) - -// AcyclicGraph is a specialization of Graph that cannot have cycles. With -// this property, we get the property of sane graph traversal. -type AcyclicGraph struct { - Graph -} - -// WalkFunc is the callback used for walking the graph. -type WalkFunc func(Vertex) tfdiags.Diagnostics - -// DepthWalkFunc is a walk function that also receives the current depth of the -// walk as an argument -type DepthWalkFunc func(Vertex, int) error - -func (g *AcyclicGraph) DirectedGraph() Grapher { - return g -} - -// Returns a Set that includes every Vertex yielded by walking down from the -// provided starting Vertex v. -func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) { - s := new(Set) - start := AsVertexList(g.DownEdges(v)) - memoFunc := func(v Vertex, d int) error { - s.Add(v) - return nil - } - - if err := g.DepthFirstWalk(start, memoFunc); err != nil { - return nil, err - } - - return s, nil -} - -// Returns a Set that includes every Vertex yielded by walking up from the -// provided starting Vertex v. -func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) { - s := new(Set) - start := AsVertexList(g.UpEdges(v)) - memoFunc := func(v Vertex, d int) error { - s.Add(v) - return nil - } - - if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil { - return nil, err - } - - return s, nil -} - -// Root returns the root of the DAG, or an error. -// -// Complexity: O(V) -func (g *AcyclicGraph) Root() (Vertex, error) { - roots := make([]Vertex, 0, 1) - for _, v := range g.Vertices() { - if g.UpEdges(v).Len() == 0 { - roots = append(roots, v) - } - } - - if len(roots) > 1 { - // TODO(mitchellh): make this error message a lot better - return nil, fmt.Errorf("multiple roots: %#v", roots) - } - - if len(roots) == 0 { - return nil, fmt.Errorf("no roots found") - } - - return roots[0], nil -} - -// TransitiveReduction performs the transitive reduction of graph g in place. -// The transitive reduction of a graph is a graph with as few edges as -// possible with the same reachability as the original graph. This means -// that if there are three nodes A => B => C, and A connects to both -// B and C, and B connects to C, then the transitive reduction is the -// same graph with only a single edge between A and B, and a single edge -// between B and C. -// -// The graph must be valid for this operation to behave properly. If -// Validate() returns an error, the behavior is undefined and the results -// will likely be unexpected. -// -// Complexity: O(V(V+E)), or asymptotically O(VE) -func (g *AcyclicGraph) TransitiveReduction() { - // For each vertex u in graph g, do a DFS starting from each vertex - // v such that the edge (u,v) exists (v is a direct descendant of u). - // - // For each v-prime reachable from v, remove the edge (u, v-prime). - defer g.debug.BeginOperation("TransitiveReduction", "").End("") - - for _, u := range g.Vertices() { - uTargets := g.DownEdges(u) - vs := AsVertexList(g.DownEdges(u)) - - g.depthFirstWalk(vs, false, func(v Vertex, d int) error { - shared := uTargets.Intersection(g.DownEdges(v)) - for _, vPrime := range AsVertexList(shared) { - g.RemoveEdge(BasicEdge(u, vPrime)) - } - - return nil - }) - } -} - -// Validate validates the DAG. A DAG is valid if it has a single root -// with no cycles. -func (g *AcyclicGraph) Validate() error { - if _, err := g.Root(); err != nil { - return err - } - - // Look for cycles of more than 1 component - var err error - cycles := g.Cycles() - if len(cycles) > 0 { - for _, cycle := range cycles { - cycleStr := make([]string, len(cycle)) - for j, vertex := range cycle { - cycleStr[j] = VertexName(vertex) - } - - err = multierror.Append(err, fmt.Errorf( - "Cycle: %s", strings.Join(cycleStr, ", "))) - } - } - - // Look for cycles to self - for _, e := range g.Edges() { - if e.Source() == e.Target() { - err = multierror.Append(err, fmt.Errorf( - "Self reference: %s", VertexName(e.Source()))) - } - } - - return err -} - -func (g *AcyclicGraph) Cycles() [][]Vertex { - var cycles [][]Vertex - for _, cycle := range StronglyConnected(&g.Graph) { - if len(cycle) > 1 { - cycles = append(cycles, cycle) - } - } - return cycles -} - -// Walk walks the graph, calling your callback as each node is visited. -// This will walk nodes in parallel if it can. The resulting diagnostics -// contains problems from all graphs visited, in no particular order. -func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics { - defer g.debug.BeginOperation(typeWalk, "").End("") - - w := &Walker{Callback: cb, Reverse: true} - w.Update(g) - return w.Wait() -} - -// simple convenience helper for converting a dag.Set to a []Vertex -func AsVertexList(s *Set) []Vertex { - rawList := s.List() - vertexList := make([]Vertex, len(rawList)) - for i, raw := range rawList { - vertexList[i] = raw.(Vertex) - } - return vertexList -} - -type vertexAtDepth struct { - Vertex Vertex - Depth int -} - -// depthFirstWalk does a depth-first walk of the graph starting from -// the vertices in start. -func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { - return g.depthFirstWalk(start, true, f) -} - -// This internal method provides the option of not sorting the vertices during -// the walk, which we use for the Transitive reduction. -// Some configurations can lead to fully-connected subgraphs, which makes our -// transitive reduction algorithm O(n^3). This is still passable for the size -// of our graphs, but the additional n^2 sort operations would make this -// uncomputable in a reasonable amount of time. -func (g *AcyclicGraph) depthFirstWalk(start []Vertex, sorted bool, f DepthWalkFunc) error { - defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") - - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, len(start)) - for i, v := range start { - frontier[i] = &vertexAtDepth{ - Vertex: v, - Depth: 0, - } - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - - // Visit targets of this in a consistent order. - targets := AsVertexList(g.DownEdges(current.Vertex)) - - if sorted { - sort.Sort(byVertexName(targets)) - } - - for _, t := range targets { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - } - - return nil -} - -// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from -// the vertices in start. -func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { - defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("") - - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, len(start)) - for i, v := range start { - frontier[i] = &vertexAtDepth{ - Vertex: v, - Depth: 0, - } - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Add next set of targets in a consistent order. - targets := AsVertexList(g.UpEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) - for _, t := range targets { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - } - - return nil -} - -// byVertexName implements sort.Interface so a list of Vertices can be sorted -// consistently by their VertexName -type byVertexName []Vertex - -func (b byVertexName) Len() int { return len(b) } -func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byVertexName) Less(i, j int) bool { - return VertexName(b[i]) < VertexName(b[j]) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go deleted file mode 100644 index 65a351b6..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go +++ /dev/null @@ -1,278 +0,0 @@ -package dag - -import ( - "bytes" - "fmt" - "sort" - "strings" -) - -// DotOpts are the options for generating a dot formatted Graph. -type DotOpts struct { - // Allows some nodes to decide to only show themselves when the user has - // requested the "verbose" graph. - Verbose bool - - // Highlight Cycles - DrawCycles bool - - // How many levels to expand modules as we draw - MaxDepth int - - // use this to keep the cluster_ naming convention from the previous dot writer - cluster bool -} - -// GraphNodeDotter can be implemented by a node to cause it to be included -// in the dot graph. The Dot method will be called which is expected to -// return a representation of this node. -type GraphNodeDotter interface { - // Dot is called to return the dot formatting for the node. - // The first parameter is the title of the node. - // The second parameter includes user-specified options that affect the dot - // graph. See GraphDotOpts below for details. - DotNode(string, *DotOpts) *DotNode -} - -// DotNode provides a structure for Vertices to return in order to specify their -// dot format. -type DotNode struct { - Name string - Attrs map[string]string -} - -// Returns the DOT representation of this Graph. -func (g *marshalGraph) Dot(opts *DotOpts) []byte { - if opts == nil { - opts = &DotOpts{ - DrawCycles: true, - MaxDepth: -1, - Verbose: true, - } - } - - var w indentWriter - w.WriteString("digraph {\n") - w.Indent() - - // some dot defaults - w.WriteString(`compound = "true"` + "\n") - w.WriteString(`newrank = "true"` + "\n") - - // the top level graph is written as the first subgraph - w.WriteString(`subgraph "root" {` + "\n") - g.writeBody(opts, &w) - - // cluster isn't really used other than for naming purposes in some graphs - opts.cluster = opts.MaxDepth != 0 - maxDepth := opts.MaxDepth - if maxDepth == 0 { - maxDepth = -1 - } - - for _, s := range g.Subgraphs { - g.writeSubgraph(s, opts, maxDepth, &w) - } - - w.Unindent() - w.WriteString("}\n") - return w.Bytes() -} - -func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte { - var buf bytes.Buffer - graphName := g.Name - if graphName == "" { - graphName = "root" - } - - name := v.Name - attrs := v.Attrs - if v.graphNodeDotter != nil { - node := v.graphNodeDotter.DotNode(name, opts) - if node == nil { - return []byte{} - } - - newAttrs := make(map[string]string) - for k, v := range attrs { - newAttrs[k] = v - } - for k, v := range node.Attrs { - newAttrs[k] = v - } - - name = node.Name - attrs = newAttrs - } - - buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name)) - writeAttrs(&buf, attrs) - buf.WriteByte('\n') - - return buf.Bytes() -} - -func (e *marshalEdge) dot(g *marshalGraph) string { - var buf bytes.Buffer - graphName := g.Name - if graphName == "" { - graphName = "root" - } - - sourceName := g.vertexByID(e.Source).Name - targetName := g.vertexByID(e.Target).Name - s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName) - buf.WriteString(s) - writeAttrs(&buf, e.Attrs) - - return buf.String() -} - -func cycleDot(e *marshalEdge, g *marshalGraph) string { - return e.dot(g) + ` [color = "red", penwidth = "2.0"]` -} - -// Write the subgraph body. The is recursive, and the depth argument is used to -// record the current depth of iteration. -func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) { - if depth == 0 { - return - } - depth-- - - name := sg.Name - if opts.cluster { - // we prefix with cluster_ to match the old dot output - name = "cluster_" + name - sg.Attrs["label"] = sg.Name - } - w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) - sg.writeBody(opts, w) - - for _, sg := range sg.Subgraphs { - g.writeSubgraph(sg, opts, depth, w) - } -} - -func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) { - w.Indent() - - for _, as := range attrStrings(g.Attrs) { - w.WriteString(as + "\n") - } - - // list of Vertices that aren't to be included in the dot output - skip := map[string]bool{} - - for _, v := range g.Vertices { - if v.graphNodeDotter == nil { - skip[v.ID] = true - continue - } - - w.Write(v.dot(g, opts)) - } - - var dotEdges []string - - if opts.DrawCycles { - for _, c := range g.Cycles { - if len(c) < 2 { - continue - } - - for i, j := 0, 1; i < len(c); i, j = i+1, j+1 { - if j >= len(c) { - j = 0 - } - src := c[i] - tgt := c[j] - - if skip[src.ID] || skip[tgt.ID] { - continue - } - - e := &marshalEdge{ - Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name), - Source: src.ID, - Target: tgt.ID, - Attrs: make(map[string]string), - } - - dotEdges = append(dotEdges, cycleDot(e, g)) - src = tgt - } - } - } - - for _, e := range g.Edges { - dotEdges = append(dotEdges, e.dot(g)) - } - - // srot these again to match the old output - sort.Strings(dotEdges) - - for _, e := range dotEdges { - w.WriteString(e + "\n") - } - - w.Unindent() - w.WriteString("}\n") -} - -func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { - if len(attrs) > 0 { - buf.WriteString(" [") - buf.WriteString(strings.Join(attrStrings(attrs), ", ")) - buf.WriteString("]") - } -} - -func attrStrings(attrs map[string]string) []string { - strings := make([]string, 0, len(attrs)) - for k, v := range attrs { - strings = append(strings, fmt.Sprintf("%s = %q", k, v)) - } - sort.Strings(strings) - return strings -} - -// Provide a bytes.Buffer like structure, which will indent when starting a -// newline. -type indentWriter struct { - bytes.Buffer - level int -} - -func (w *indentWriter) indent() { - newline := []byte("\n") - if !bytes.HasSuffix(w.Bytes(), newline) { - return - } - for i := 0; i < w.level; i++ { - w.Buffer.WriteString("\t") - } -} - -// Indent increases indentation by 1 -func (w *indentWriter) Indent() { w.level++ } - -// Unindent decreases indentation by 1 -func (w *indentWriter) Unindent() { w.level-- } - -// the following methods intercecpt the byte.Buffer writes and insert the -// indentation when starting a new line. -func (w *indentWriter) Write(b []byte) (int, error) { - w.indent() - return w.Buffer.Write(b) -} - -func (w *indentWriter) WriteString(s string) (int, error) { - w.indent() - return w.Buffer.WriteString(s) -} -func (w *indentWriter) WriteByte(b byte) error { - w.indent() - return w.Buffer.WriteByte(b) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go deleted file mode 100644 index f0d99ee3..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go +++ /dev/null @@ -1,37 +0,0 @@ -package dag - -import ( - "fmt" -) - -// Edge represents an edge in the graph, with a source and target vertex. -type Edge interface { - Source() Vertex - Target() Vertex - - Hashable -} - -// BasicEdge returns an Edge implementation that simply tracks the source -// and target given as-is. -func BasicEdge(source, target Vertex) Edge { - return &basicEdge{S: source, T: target} -} - -// basicEdge is a basic implementation of Edge that has the source and -// target vertex. -type basicEdge struct { - S, T Vertex -} - -func (e *basicEdge) Hashcode() interface{} { - return fmt.Sprintf("%p-%p", e.S, e.T) -} - -func (e *basicEdge) Source() Vertex { - return e.S -} - -func (e *basicEdge) Target() Vertex { - return e.T -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go deleted file mode 100644 index e7517a20..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go +++ /dev/null @@ -1,391 +0,0 @@ -package dag - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "sort" -) - -// Graph is used to represent a dependency graph. -type Graph struct { - vertices *Set - edges *Set - downEdges map[interface{}]*Set - upEdges map[interface{}]*Set - - // JSON encoder for recording debug information - debug *encoder -} - -// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. -type Subgrapher interface { - Subgraph() Grapher -} - -// A Grapher is any type that returns a Grapher, mainly used to identify -// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they -// return themselves. -type Grapher interface { - DirectedGraph() Grapher -} - -// Vertex of the graph. -type Vertex interface{} - -// NamedVertex is an optional interface that can be implemented by Vertex -// to give it a human-friendly name that is used for outputting the graph. -type NamedVertex interface { - Vertex - Name() string -} - -func (g *Graph) DirectedGraph() Grapher { - return g -} - -// Vertices returns the list of all the vertices in the graph. -func (g *Graph) Vertices() []Vertex { - list := g.vertices.List() - result := make([]Vertex, len(list)) - for i, v := range list { - result[i] = v.(Vertex) - } - - return result -} - -// Edges returns the list of all the edges in the graph. -func (g *Graph) Edges() []Edge { - list := g.edges.List() - result := make([]Edge, len(list)) - for i, v := range list { - result[i] = v.(Edge) - } - - return result -} - -// EdgesFrom returns the list of edges from the given source. -func (g *Graph) EdgesFrom(v Vertex) []Edge { - var result []Edge - from := hashcode(v) - for _, e := range g.Edges() { - if hashcode(e.Source()) == from { - result = append(result, e) - } - } - - return result -} - -// EdgesTo returns the list of edges to the given target. -func (g *Graph) EdgesTo(v Vertex) []Edge { - var result []Edge - search := hashcode(v) - for _, e := range g.Edges() { - if hashcode(e.Target()) == search { - result = append(result, e) - } - } - - return result -} - -// HasVertex checks if the given Vertex is present in the graph. -func (g *Graph) HasVertex(v Vertex) bool { - return g.vertices.Include(v) -} - -// HasEdge checks if the given Edge is present in the graph. -func (g *Graph) HasEdge(e Edge) bool { - return g.edges.Include(e) -} - -// Add adds a vertex to the graph. This is safe to call multiple time with -// the same Vertex. -func (g *Graph) Add(v Vertex) Vertex { - g.init() - g.vertices.Add(v) - g.debug.Add(v) - return v -} - -// Remove removes a vertex from the graph. This will also remove any -// edges with this vertex as a source or target. -func (g *Graph) Remove(v Vertex) Vertex { - // Delete the vertex itself - g.vertices.Delete(v) - g.debug.Remove(v) - - // Delete the edges to non-existent things - for _, target := range g.DownEdges(v).List() { - g.RemoveEdge(BasicEdge(v, target)) - } - for _, source := range g.UpEdges(v).List() { - g.RemoveEdge(BasicEdge(source, v)) - } - - return nil -} - -// Replace replaces the original Vertex with replacement. If the original -// does not exist within the graph, then false is returned. Otherwise, true -// is returned. -func (g *Graph) Replace(original, replacement Vertex) bool { - // If we don't have the original, we can't do anything - if !g.vertices.Include(original) { - return false - } - - defer g.debug.BeginOperation("Replace", "").End("") - - // If they're the same, then don't do anything - if original == replacement { - return true - } - - // Add our new vertex, then copy all the edges - g.Add(replacement) - for _, target := range g.DownEdges(original).List() { - g.Connect(BasicEdge(replacement, target)) - } - for _, source := range g.UpEdges(original).List() { - g.Connect(BasicEdge(source, replacement)) - } - - // Remove our old vertex, which will also remove all the edges - g.Remove(original) - - return true -} - -// RemoveEdge removes an edge from the graph. -func (g *Graph) RemoveEdge(edge Edge) { - g.init() - g.debug.RemoveEdge(edge) - - // Delete the edge from the set - g.edges.Delete(edge) - - // Delete the up/down edges - if s, ok := g.downEdges[hashcode(edge.Source())]; ok { - s.Delete(edge.Target()) - } - if s, ok := g.upEdges[hashcode(edge.Target())]; ok { - s.Delete(edge.Source()) - } -} - -// DownEdges returns the outward edges from the source Vertex v. -func (g *Graph) DownEdges(v Vertex) *Set { - g.init() - return g.downEdges[hashcode(v)] -} - -// UpEdges returns the inward edges to the destination Vertex v. -func (g *Graph) UpEdges(v Vertex) *Set { - g.init() - return g.upEdges[hashcode(v)] -} - -// Connect adds an edge with the given source and target. This is safe to -// call multiple times with the same value. Note that the same value is -// verified through pointer equality of the vertices, not through the -// value of the edge itself. -func (g *Graph) Connect(edge Edge) { - g.init() - g.debug.Connect(edge) - - source := edge.Source() - target := edge.Target() - sourceCode := hashcode(source) - targetCode := hashcode(target) - - // Do we have this already? If so, don't add it again. - if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) { - return - } - - // Add the edge to the set - g.edges.Add(edge) - - // Add the down edge - s, ok := g.downEdges[sourceCode] - if !ok { - s = new(Set) - g.downEdges[sourceCode] = s - } - s.Add(target) - - // Add the up edge - s, ok = g.upEdges[targetCode] - if !ok { - s = new(Set) - g.upEdges[targetCode] = s - } - s.Add(source) -} - -// String outputs some human-friendly output for the graph structure. -func (g *Graph) StringWithNodeTypes() string { - var buf bytes.Buffer - - // Build the list of node names and a mapping so that we can more - // easily alphabetize the output to remain deterministic. - vertices := g.Vertices() - names := make([]string, 0, len(vertices)) - mapping := make(map[string]Vertex, len(vertices)) - for _, v := range vertices { - name := VertexName(v) - names = append(names, name) - mapping[name] = v - } - sort.Strings(names) - - // Write each node in order... - for _, name := range names { - v := mapping[name] - targets := g.downEdges[hashcode(v)] - - buf.WriteString(fmt.Sprintf("%s - %T\n", name, v)) - - // Alphabetize dependencies - deps := make([]string, 0, targets.Len()) - targetNodes := make(map[string]Vertex) - for _, target := range targets.List() { - dep := VertexName(target) - deps = append(deps, dep) - targetNodes[dep] = target - } - sort.Strings(deps) - - // Write dependencies - for _, d := range deps { - buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) - } - } - - return buf.String() -} - -// String outputs some human-friendly output for the graph structure. -func (g *Graph) String() string { - var buf bytes.Buffer - - // Build the list of node names and a mapping so that we can more - // easily alphabetize the output to remain deterministic. - vertices := g.Vertices() - names := make([]string, 0, len(vertices)) - mapping := make(map[string]Vertex, len(vertices)) - for _, v := range vertices { - name := VertexName(v) - names = append(names, name) - mapping[name] = v - } - sort.Strings(names) - - // Write each node in order... - for _, name := range names { - v := mapping[name] - targets := g.downEdges[hashcode(v)] - - buf.WriteString(fmt.Sprintf("%s\n", name)) - - // Alphabetize dependencies - deps := make([]string, 0, targets.Len()) - for _, target := range targets.List() { - deps = append(deps, VertexName(target)) - } - sort.Strings(deps) - - // Write dependencies - for _, d := range deps { - buf.WriteString(fmt.Sprintf(" %s\n", d)) - } - } - - return buf.String() -} - -func (g *Graph) init() { - if g.vertices == nil { - g.vertices = new(Set) - } - if g.edges == nil { - g.edges = new(Set) - } - if g.downEdges == nil { - g.downEdges = make(map[interface{}]*Set) - } - if g.upEdges == nil { - g.upEdges = make(map[interface{}]*Set) - } -} - -// Dot returns a dot-formatted representation of the Graph. -func (g *Graph) Dot(opts *DotOpts) []byte { - return newMarshalGraph("", g).Dot(opts) -} - -// MarshalJSON returns a JSON representation of the entire Graph. -func (g *Graph) MarshalJSON() ([]byte, error) { - dg := newMarshalGraph("root", g) - return json.MarshalIndent(dg, "", " ") -} - -// SetDebugWriter sets the io.Writer where the Graph will record debug -// information. After this is set, the graph will immediately encode itself to -// the stream, and continue to record all subsequent operations. -func (g *Graph) SetDebugWriter(w io.Writer) { - g.debug = &encoder{w: w} - g.debug.Encode(newMarshalGraph("root", g)) -} - -// DebugVertexInfo encodes arbitrary information about a vertex in the graph -// debug logs. -func (g *Graph) DebugVertexInfo(v Vertex, info string) { - va := newVertexInfo(typeVertexInfo, v, info) - g.debug.Encode(va) -} - -// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug -// logs. -func (g *Graph) DebugEdgeInfo(e Edge, info string) { - ea := newEdgeInfo(typeEdgeInfo, e, info) - g.debug.Encode(ea) -} - -// DebugVisitInfo records a visit to a Vertex during a walk operation. -func (g *Graph) DebugVisitInfo(v Vertex, info string) { - vi := newVertexInfo(typeVisitInfo, v, info) - g.debug.Encode(vi) -} - -// DebugOperation marks the start of a set of graph transformations in -// the debug log, and returns a DebugOperationEnd func, which marks the end of -// the operation in the log. Additional information can be added to the log via -// the info parameter. -// -// The returned func's End method allows this method to be called from a single -// defer statement: -// defer g.DebugOperationBegin("OpName", "operating").End("") -// -// The returned function must be called to properly close the logical operation -// in the logs. -func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd { - return g.debug.BeginOperation(operation, info) -} - -// VertexName returns the name of a vertex. -func VertexName(raw Vertex) string { - switch v := raw.(type) { - case NamedVertex: - return v.Name() - case fmt.Stringer: - return fmt.Sprintf("%s", v) - default: - return fmt.Sprintf("%v", v) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go deleted file mode 100644 index 7b23ea9c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go +++ /dev/null @@ -1,460 +0,0 @@ -package dag - -import ( - "encoding/json" - "fmt" - "io" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -const ( - typeOperation = "Operation" - typeTransform = "Transform" - typeWalk = "Walk" - typeDepthFirstWalk = "DepthFirstWalk" - typeReverseDepthFirstWalk = "ReverseDepthFirstWalk" - typeTransitiveReduction = "TransitiveReduction" - typeEdgeInfo = "EdgeInfo" - typeVertexInfo = "VertexInfo" - typeVisitInfo = "VisitInfo" -) - -// the marshal* structs are for serialization of the graph data. -type marshalGraph struct { - // Type is always "Graph", for identification as a top level object in the - // JSON stream. - Type string - - // Each marshal structure requires a unique ID so that it can be referenced - // by other structures. - ID string `json:",omitempty"` - - // Human readable name for this graph. - Name string `json:",omitempty"` - - // Arbitrary attributes that can be added to the output. - Attrs map[string]string `json:",omitempty"` - - // List of graph vertices, sorted by ID. - Vertices []*marshalVertex `json:",omitempty"` - - // List of edges, sorted by Source ID. - Edges []*marshalEdge `json:",omitempty"` - - // Any number of subgraphs. A subgraph itself is considered a vertex, and - // may be referenced by either end of an edge. - Subgraphs []*marshalGraph `json:",omitempty"` - - // Any lists of vertices that are included in cycles. - Cycles [][]*marshalVertex `json:",omitempty"` -} - -// The add, remove, connect, removeEdge methods mirror the basic Graph -// manipulations to reconstruct a marshalGraph from a debug log. -func (g *marshalGraph) add(v *marshalVertex) { - g.Vertices = append(g.Vertices, v) - sort.Sort(vertices(g.Vertices)) -} - -func (g *marshalGraph) remove(v *marshalVertex) { - for i, existing := range g.Vertices { - if v.ID == existing.ID { - g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...) - return - } - } -} - -func (g *marshalGraph) connect(e *marshalEdge) { - g.Edges = append(g.Edges, e) - sort.Sort(edges(g.Edges)) -} - -func (g *marshalGraph) removeEdge(e *marshalEdge) { - for i, existing := range g.Edges { - if e.Source == existing.Source && e.Target == existing.Target { - g.Edges = append(g.Edges[:i], g.Edges[i+1:]...) - return - } - } -} - -func (g *marshalGraph) vertexByID(id string) *marshalVertex { - for _, v := range g.Vertices { - if id == v.ID { - return v - } - } - return nil -} - -type marshalVertex struct { - // Unique ID, used to reference this vertex from other structures. - ID string - - // Human readable name - Name string `json:",omitempty"` - - Attrs map[string]string `json:",omitempty"` - - // This is to help transition from the old Dot interfaces. We record if the - // node was a GraphNodeDotter here, so we can call it to get attributes. - graphNodeDotter GraphNodeDotter -} - -func newMarshalVertex(v Vertex) *marshalVertex { - dn, ok := v.(GraphNodeDotter) - if !ok { - dn = nil - } - - return &marshalVertex{ - ID: marshalVertexID(v), - Name: VertexName(v), - Attrs: make(map[string]string), - graphNodeDotter: dn, - } -} - -// vertices is a sort.Interface implementation for sorting vertices by ID -type vertices []*marshalVertex - -func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name } -func (v vertices) Len() int { return len(v) } -func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] } - -type marshalEdge struct { - // Human readable name - Name string - - // Source and Target Vertices by ID - Source string - Target string - - Attrs map[string]string `json:",omitempty"` -} - -func newMarshalEdge(e Edge) *marshalEdge { - return &marshalEdge{ - Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())), - Source: marshalVertexID(e.Source()), - Target: marshalVertexID(e.Target()), - Attrs: make(map[string]string), - } -} - -// edges is a sort.Interface implementation for sorting edges by Source ID -type edges []*marshalEdge - -func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name } -func (e edges) Len() int { return len(e) } -func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } - -// build a marshalGraph structure from a *Graph -func newMarshalGraph(name string, g *Graph) *marshalGraph { - mg := &marshalGraph{ - Type: "Graph", - Name: name, - Attrs: make(map[string]string), - } - - for _, v := range g.Vertices() { - id := marshalVertexID(v) - if sg, ok := marshalSubgrapher(v); ok { - smg := newMarshalGraph(VertexName(v), sg) - smg.ID = id - mg.Subgraphs = append(mg.Subgraphs, smg) - } - - mv := newMarshalVertex(v) - mg.Vertices = append(mg.Vertices, mv) - } - - sort.Sort(vertices(mg.Vertices)) - - for _, e := range g.Edges() { - mg.Edges = append(mg.Edges, newMarshalEdge(e)) - } - - sort.Sort(edges(mg.Edges)) - - for _, c := range (&AcyclicGraph{*g}).Cycles() { - var cycle []*marshalVertex - for _, v := range c { - mv := newMarshalVertex(v) - cycle = append(cycle, mv) - } - mg.Cycles = append(mg.Cycles, cycle) - } - - return mg -} - -// Attempt to return a unique ID for any vertex. -func marshalVertexID(v Vertex) string { - val := reflect.ValueOf(v) - switch val.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return strconv.Itoa(int(val.Pointer())) - case reflect.Interface: - return strconv.Itoa(int(val.InterfaceData()[1])) - } - - if v, ok := v.(Hashable); ok { - h := v.Hashcode() - if h, ok := h.(string); ok { - return h - } - } - - // fallback to a name, which we hope is unique. - return VertexName(v) - - // we could try harder by attempting to read the arbitrary value from the - // interface, but we shouldn't get here from terraform right now. -} - -// check for a Subgrapher, and return the underlying *Graph. -func marshalSubgrapher(v Vertex) (*Graph, bool) { - sg, ok := v.(Subgrapher) - if !ok { - return nil, false - } - - switch g := sg.Subgraph().DirectedGraph().(type) { - case *Graph: - return g, true - case *AcyclicGraph: - return &g.Graph, true - } - - return nil, false -} - -// The DebugOperationEnd func type provides a way to call an End function via a -// method call, allowing for the chaining of methods in a defer statement. -type DebugOperationEnd func(string) - -// End calls function e with the info parameter, marking the end of this -// operation in the logs. -func (e DebugOperationEnd) End(info string) { e(info) } - -// encoder provides methods to write debug data to an io.Writer, and is a noop -// when no writer is present -type encoder struct { - sync.Mutex - w io.Writer -} - -// Encode is analogous to json.Encoder.Encode -func (e *encoder) Encode(i interface{}) { - if e == nil || e.w == nil { - return - } - e.Lock() - defer e.Unlock() - - js, err := json.Marshal(i) - if err != nil { - log.Println("[ERROR] dag:", err) - return - } - js = append(js, '\n') - - _, err = e.w.Write(js) - if err != nil { - log.Println("[ERROR] dag:", err) - return - } -} - -func (e *encoder) Add(v Vertex) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - AddVertex: newMarshalVertex(v), - }) -} - -// Remove records the removal of Vertex v. -func (e *encoder) Remove(v Vertex) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - RemoveVertex: newMarshalVertex(v), - }) -} - -func (e *encoder) Connect(edge Edge) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - AddEdge: newMarshalEdge(edge), - }) -} - -func (e *encoder) RemoveEdge(edge Edge) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - RemoveEdge: newMarshalEdge(edge), - }) -} - -// BeginOperation marks the start of set of graph transformations, and returns -// an EndDebugOperation func to be called once the opration is complete. -func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd { - if e == nil { - return func(string) {} - } - - e.Encode(marshalOperation{ - Type: typeOperation, - Begin: op, - Info: info, - }) - - return func(info string) { - e.Encode(marshalOperation{ - Type: typeOperation, - End: op, - Info: info, - }) - } -} - -// structure for recording graph transformations -type marshalTransform struct { - // Type: "Transform" - Type string - AddEdge *marshalEdge `json:",omitempty"` - RemoveEdge *marshalEdge `json:",omitempty"` - AddVertex *marshalVertex `json:",omitempty"` - RemoveVertex *marshalVertex `json:",omitempty"` -} - -func (t marshalTransform) Transform(g *marshalGraph) { - switch { - case t.AddEdge != nil: - g.connect(t.AddEdge) - case t.RemoveEdge != nil: - g.removeEdge(t.RemoveEdge) - case t.AddVertex != nil: - g.add(t.AddVertex) - case t.RemoveVertex != nil: - g.remove(t.RemoveVertex) - } -} - -// this structure allows us to decode any object in the json stream for -// inspection, then re-decode it into a proper struct if needed. -type streamDecode struct { - Type string - Map map[string]interface{} - JSON []byte -} - -func (s *streamDecode) UnmarshalJSON(d []byte) error { - s.JSON = d - err := json.Unmarshal(d, &s.Map) - if err != nil { - return err - } - - if t, ok := s.Map["Type"]; ok { - s.Type, _ = t.(string) - } - return nil -} - -// structure for recording the beginning and end of any multi-step -// transformations. These are informational, and not required to reproduce the -// graph state. -type marshalOperation struct { - Type string - Begin string `json:",omitempty"` - End string `json:",omitempty"` - Info string `json:",omitempty"` -} - -// decodeGraph decodes a marshalGraph from an encoded graph stream. -func decodeGraph(r io.Reader) (*marshalGraph, error) { - dec := json.NewDecoder(r) - - // a stream should always start with a graph - g := &marshalGraph{} - - err := dec.Decode(g) - if err != nil { - return nil, err - } - - // now replay any operations that occurred on the original graph - for dec.More() { - s := &streamDecode{} - err := dec.Decode(s) - if err != nil { - return g, err - } - - // the only Type we're concerned with here is Transform to complete the - // Graph - if s.Type != typeTransform { - continue - } - - t := &marshalTransform{} - err = json.Unmarshal(s.JSON, t) - if err != nil { - return g, err - } - t.Transform(g) - } - return g, nil -} - -// marshalVertexInfo allows encoding arbitrary information about the a single -// Vertex in the logs. These are accumulated for informational display while -// rebuilding the graph. -type marshalVertexInfo struct { - Type string - Vertex *marshalVertex - Info string -} - -func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo { - return &marshalVertexInfo{ - Type: infoType, - Vertex: newMarshalVertex(v), - Info: info, - } -} - -// marshalEdgeInfo allows encoding arbitrary information about the a single -// Edge in the logs. These are accumulated for informational display while -// rebuilding the graph. -type marshalEdgeInfo struct { - Type string - Edge *marshalEdge - Info string -} - -func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo { - return &marshalEdgeInfo{ - Type: infoType, - Edge: newMarshalEdge(e), - Info: info, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go deleted file mode 100644 index 92b42151..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go +++ /dev/null @@ -1,123 +0,0 @@ -package dag - -import ( - "sync" -) - -// Set is a set data structure. -type Set struct { - m map[interface{}]interface{} - once sync.Once -} - -// Hashable is the interface used by set to get the hash code of a value. -// If this isn't given, then the value of the item being added to the set -// itself is used as the comparison value. -type Hashable interface { - Hashcode() interface{} -} - -// hashcode returns the hashcode used for set elements. -func hashcode(v interface{}) interface{} { - if h, ok := v.(Hashable); ok { - return h.Hashcode() - } - - return v -} - -// Add adds an item to the set -func (s *Set) Add(v interface{}) { - s.once.Do(s.init) - s.m[hashcode(v)] = v -} - -// Delete removes an item from the set. -func (s *Set) Delete(v interface{}) { - s.once.Do(s.init) - delete(s.m, hashcode(v)) -} - -// Include returns true/false of whether a value is in the set. -func (s *Set) Include(v interface{}) bool { - s.once.Do(s.init) - _, ok := s.m[hashcode(v)] - return ok -} - -// Intersection computes the set intersection with other. -func (s *Set) Intersection(other *Set) *Set { - result := new(Set) - if s == nil { - return result - } - if other != nil { - for _, v := range s.m { - if other.Include(v) { - result.Add(v) - } - } - } - - return result -} - -// Difference returns a set with the elements that s has but -// other doesn't. -func (s *Set) Difference(other *Set) *Set { - result := new(Set) - if s != nil { - for k, v := range s.m { - var ok bool - if other != nil { - _, ok = other.m[k] - } - if !ok { - result.Add(v) - } - } - } - - return result -} - -// Filter returns a set that contains the elements from the receiver -// where the given callback returns true. -func (s *Set) Filter(cb func(interface{}) bool) *Set { - result := new(Set) - - for _, v := range s.m { - if cb(v) { - result.Add(v) - } - } - - return result -} - -// Len is the number of items in the set. -func (s *Set) Len() int { - if s == nil { - return 0 - } - - return len(s.m) -} - -// List returns the list of set elements. -func (s *Set) List() []interface{} { - if s == nil { - return nil - } - - r := make([]interface{}, 0, len(s.m)) - for _, v := range s.m { - r = append(r, v) - } - - return r -} - -func (s *Set) init() { - s.m = make(map[interface{}]interface{}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go deleted file mode 100644 index 9d8b25ce..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go +++ /dev/null @@ -1,107 +0,0 @@ -package dag - -// StronglyConnected returns the list of strongly connected components -// within the Graph g. This information is primarily used by this package -// for cycle detection, but strongly connected components have widespread -// use. -func StronglyConnected(g *Graph) [][]Vertex { - vs := g.Vertices() - acct := sccAcct{ - NextIndex: 1, - VertexIndex: make(map[Vertex]int, len(vs)), - } - for _, v := range vs { - // Recurse on any non-visited nodes - if acct.VertexIndex[v] == 0 { - stronglyConnected(&acct, g, v) - } - } - return acct.SCC -} - -func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int { - // Initial vertex visit - index := acct.visit(v) - minIdx := index - - for _, raw := range g.DownEdges(v).List() { - target := raw.(Vertex) - targetIdx := acct.VertexIndex[target] - - // Recurse on successor if not yet visited - if targetIdx == 0 { - minIdx = min(minIdx, stronglyConnected(acct, g, target)) - } else if acct.inStack(target) { - // Check if the vertex is in the stack - minIdx = min(minIdx, targetIdx) - } - } - - // Pop the strongly connected components off the stack if - // this is a root vertex - if index == minIdx { - var scc []Vertex - for { - v2 := acct.pop() - scc = append(scc, v2) - if v2 == v { - break - } - } - - acct.SCC = append(acct.SCC, scc) - } - - return minIdx -} - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -// sccAcct is used ot pass around accounting information for -// the StronglyConnectedComponents algorithm -type sccAcct struct { - NextIndex int - VertexIndex map[Vertex]int - Stack []Vertex - SCC [][]Vertex -} - -// visit assigns an index and pushes a vertex onto the stack -func (s *sccAcct) visit(v Vertex) int { - idx := s.NextIndex - s.VertexIndex[v] = idx - s.NextIndex++ - s.push(v) - return idx -} - -// push adds a vertex to the stack -func (s *sccAcct) push(n Vertex) { - s.Stack = append(s.Stack, n) -} - -// pop removes a vertex from the stack -func (s *sccAcct) pop() Vertex { - n := len(s.Stack) - if n == 0 { - return nil - } - vertex := s.Stack[n-1] - s.Stack = s.Stack[:n-1] - return vertex -} - -// inStack checks if a vertex is in the stack -func (s *sccAcct) inStack(needle Vertex) bool { - for _, n := range s.Stack { - if n == needle { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go deleted file mode 100644 index 5ddf8ef3..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go +++ /dev/null @@ -1,454 +0,0 @@ -package dag - -import ( - "errors" - "log" - "sync" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Walker is used to walk every vertex of a graph in parallel. -// -// A vertex will only be walked when the dependencies of that vertex have -// been walked. If two vertices can be walked at the same time, they will be. -// -// Update can be called to update the graph. This can be called even during -// a walk, cahnging vertices/edges mid-walk. This should be done carefully. -// If a vertex is removed but has already been executed, the result of that -// execution (any error) is still returned by Wait. Changing or re-adding -// a vertex that has already executed has no effect. Changing edges of -// a vertex that has already executed has no effect. -// -// Non-parallelism can be enforced by introducing a lock in your callback -// function. However, the goroutine overhead of a walk will remain. -// Walker will create V*2 goroutines (one for each vertex, and dependency -// waiter for each vertex). In general this should be of no concern unless -// there are a huge number of vertices. -// -// The walk is depth first by default. This can be changed with the Reverse -// option. -// -// A single walker is only valid for one graph walk. After the walk is complete -// you must construct a new walker to walk again. State for the walk is never -// deleted in case vertices or edges are changed. -type Walker struct { - // Callback is what is called for each vertex - Callback WalkFunc - - // Reverse, if true, causes the source of an edge to depend on a target. - // When false (default), the target depends on the source. - Reverse bool - - // changeLock must be held to modify any of the fields below. Only Update - // should modify these fields. Modifying them outside of Update can cause - // serious problems. - changeLock sync.Mutex - vertices Set - edges Set - vertexMap map[Vertex]*walkerVertex - - // wait is done when all vertices have executed. It may become "undone" - // if new vertices are added. - wait sync.WaitGroup - - // diagsMap contains the diagnostics recorded so far for execution, - // and upstreamFailed contains all the vertices whose problems were - // caused by upstream failures, and thus whose diagnostics should be - // excluded from the final set. - // - // Readers and writers of either map must hold diagsLock. - diagsMap map[Vertex]tfdiags.Diagnostics - upstreamFailed map[Vertex]struct{} - diagsLock sync.Mutex -} - -type walkerVertex struct { - // These should only be set once on initialization and never written again. - // They are not protected by a lock since they don't need to be since - // they are write-once. - - // DoneCh is closed when this vertex has completed execution, regardless - // of success. - // - // CancelCh is closed when the vertex should cancel execution. If execution - // is already complete (DoneCh is closed), this has no effect. Otherwise, - // execution is cancelled as quickly as possible. - DoneCh chan struct{} - CancelCh chan struct{} - - // Dependency information. Any changes to any of these fields requires - // holding DepsLock. - // - // DepsCh is sent a single value that denotes whether the upstream deps - // were successful (no errors). Any value sent means that the upstream - // dependencies are complete. No other values will ever be sent again. - // - // DepsUpdateCh is closed when there is a new DepsCh set. - DepsCh chan bool - DepsUpdateCh chan struct{} - DepsLock sync.Mutex - - // Below is not safe to read/write in parallel. This behavior is - // enforced by changes only happening in Update. Nothing else should - // ever modify these. - deps map[Vertex]chan struct{} - depsCancelCh chan struct{} -} - -// Wait waits for the completion of the walk and returns diagnostics describing -// any problems that arose. Update should be called to populate the walk with -// vertices and edges prior to calling this. -// -// Wait will return as soon as all currently known vertices are complete. -// If you plan on calling Update with more vertices in the future, you -// should not call Wait until after this is done. -func (w *Walker) Wait() tfdiags.Diagnostics { - // Wait for completion - w.wait.Wait() - - var diags tfdiags.Diagnostics - w.diagsLock.Lock() - for v, vDiags := range w.diagsMap { - if _, upstream := w.upstreamFailed[v]; upstream { - // Ignore diagnostics for nodes that had failed upstreams, since - // the downstream diagnostics are likely to be redundant. - continue - } - diags = diags.Append(vDiags) - } - w.diagsLock.Unlock() - - return diags -} - -// Update updates the currently executing walk with the given graph. -// This will perform a diff of the vertices and edges and update the walker. -// Already completed vertices remain completed (including any errors during -// their execution). -// -// This returns immediately once the walker is updated; it does not wait -// for completion of the walk. -// -// Multiple Updates can be called in parallel. Update can be called at any -// time during a walk. -func (w *Walker) Update(g *AcyclicGraph) { - log.Print("[TRACE] dag/walk: updating graph") - var v, e *Set - if g != nil { - v, e = g.vertices, g.edges - } - - // Grab the change lock so no more updates happen but also so that - // no new vertices are executed during this time since we may be - // removing them. - w.changeLock.Lock() - defer w.changeLock.Unlock() - - // Initialize fields - if w.vertexMap == nil { - w.vertexMap = make(map[Vertex]*walkerVertex) - } - - // Calculate all our sets - newEdges := e.Difference(&w.edges) - oldEdges := w.edges.Difference(e) - newVerts := v.Difference(&w.vertices) - oldVerts := w.vertices.Difference(v) - - // Add the new vertices - for _, raw := range newVerts.List() { - v := raw.(Vertex) - - // Add to the waitgroup so our walk is not done until everything finishes - w.wait.Add(1) - - // Add to our own set so we know about it already - log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) - w.vertices.Add(raw) - - // Initialize the vertex info - info := &walkerVertex{ - DoneCh: make(chan struct{}), - CancelCh: make(chan struct{}), - deps: make(map[Vertex]chan struct{}), - } - - // Add it to the map and kick off the walk - w.vertexMap[v] = info - } - - // Remove the old vertices - for _, raw := range oldVerts.List() { - v := raw.(Vertex) - - // Get the vertex info so we can cancel it - info, ok := w.vertexMap[v] - if !ok { - // This vertex for some reason was never in our map. This - // shouldn't be possible. - continue - } - - // Cancel the vertex - close(info.CancelCh) - - // Delete it out of the map - delete(w.vertexMap, v) - - log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) - w.vertices.Delete(raw) - } - - // Add the new edges - var changedDeps Set - for _, raw := range newEdges.List() { - edge := raw.(Edge) - waiter, dep := w.edgeParts(edge) - - // Get the info for the waiter - waiterInfo, ok := w.vertexMap[waiter] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Get the info for the dep - depInfo, ok := w.vertexMap[dep] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Add the dependency to our waiter - waiterInfo.deps[dep] = depInfo.DoneCh - - // Record that the deps changed for this waiter - changedDeps.Add(waiter) - - log.Printf( - "[TRACE] dag/walk: added edge: %q waiting on %q", - VertexName(waiter), VertexName(dep)) - w.edges.Add(raw) - } - - // Process reoved edges - for _, raw := range oldEdges.List() { - edge := raw.(Edge) - waiter, dep := w.edgeParts(edge) - - // Get the info for the waiter - waiterInfo, ok := w.vertexMap[waiter] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Delete the dependency from the waiter - delete(waiterInfo.deps, dep) - - // Record that the deps changed for this waiter - changedDeps.Add(waiter) - - log.Printf( - "[TRACE] dag/walk: removed edge: %q waiting on %q", - VertexName(waiter), VertexName(dep)) - w.edges.Delete(raw) - } - - // For each vertex with changed dependencies, we need to kick off - // a new waiter and notify the vertex of the changes. - for _, raw := range changedDeps.List() { - v := raw.(Vertex) - info, ok := w.vertexMap[v] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Create a new done channel - doneCh := make(chan bool, 1) - - // Create the channel we close for cancellation - cancelCh := make(chan struct{}) - - // Build a new deps copy - deps := make(map[Vertex]<-chan struct{}) - for k, v := range info.deps { - deps[k] = v - } - - // Update the update channel - info.DepsLock.Lock() - if info.DepsUpdateCh != nil { - close(info.DepsUpdateCh) - } - info.DepsCh = doneCh - info.DepsUpdateCh = make(chan struct{}) - info.DepsLock.Unlock() - - // Cancel the older waiter - if info.depsCancelCh != nil { - close(info.depsCancelCh) - } - info.depsCancelCh = cancelCh - - log.Printf( - "[TRACE] dag/walk: dependencies changed for %q, sending new deps", - VertexName(v)) - - // Start the waiter - go w.waitDeps(v, deps, doneCh, cancelCh) - } - - // Start all the new vertices. We do this at the end so that all - // the edge waiters and changes are setup above. - for _, raw := range newVerts.List() { - v := raw.(Vertex) - go w.walkVertex(v, w.vertexMap[v]) - } -} - -// edgeParts returns the waiter and the dependency, in that order. -// The waiter is waiting on the dependency. -func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { - if w.Reverse { - return e.Source(), e.Target() - } - - return e.Target(), e.Source() -} - -// walkVertex walks a single vertex, waiting for any dependencies before -// executing the callback. -func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { - // When we're done executing, lower the waitgroup count - defer w.wait.Done() - - // When we're done, always close our done channel - defer close(info.DoneCh) - - // Wait for our dependencies. We create a [closed] deps channel so - // that we can immediately fall through to load our actual DepsCh. - var depsSuccess bool - var depsUpdateCh chan struct{} - depsCh := make(chan bool, 1) - depsCh <- true - close(depsCh) - for { - select { - case <-info.CancelCh: - // Cancel - return - - case depsSuccess = <-depsCh: - // Deps complete! Mark as nil to trigger completion handling. - depsCh = nil - - case <-depsUpdateCh: - // New deps, reloop - } - - // Check if we have updated dependencies. This can happen if the - // dependencies were satisfied exactly prior to an Update occurring. - // In that case, we'd like to take into account new dependencies - // if possible. - info.DepsLock.Lock() - if info.DepsCh != nil { - depsCh = info.DepsCh - info.DepsCh = nil - } - if info.DepsUpdateCh != nil { - depsUpdateCh = info.DepsUpdateCh - } - info.DepsLock.Unlock() - - // If we still have no deps channel set, then we're done! - if depsCh == nil { - break - } - } - - // If we passed dependencies, we just want to check once more that - // we're not cancelled, since this can happen just as dependencies pass. - select { - case <-info.CancelCh: - // Cancelled during an update while dependencies completed. - return - default: - } - - // Run our callback or note that our upstream failed - var diags tfdiags.Diagnostics - var upstreamFailed bool - if depsSuccess { - log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v)) - diags = w.Callback(v) - } else { - log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v)) - // This won't be displayed to the user because we'll set upstreamFailed, - // but we need to ensure there's at least one error in here so that - // the failures will cascade downstream. - diags = diags.Append(errors.New("upstream dependencies failed")) - upstreamFailed = true - } - - // Record the result (we must do this after execution because we mustn't - // hold diagsLock while visiting a vertex.) - w.diagsLock.Lock() - if w.diagsMap == nil { - w.diagsMap = make(map[Vertex]tfdiags.Diagnostics) - } - w.diagsMap[v] = diags - if w.upstreamFailed == nil { - w.upstreamFailed = make(map[Vertex]struct{}) - } - if upstreamFailed { - w.upstreamFailed[v] = struct{}{} - } - w.diagsLock.Unlock() -} - -func (w *Walker) waitDeps( - v Vertex, - deps map[Vertex]<-chan struct{}, - doneCh chan<- bool, - cancelCh <-chan struct{}) { - - // For each dependency given to us, wait for it to complete - for dep, depCh := range deps { - DepSatisfied: - for { - select { - case <-depCh: - // Dependency satisfied! - break DepSatisfied - - case <-cancelCh: - // Wait cancelled. Note that we didn't satisfy dependencies - // so that anything waiting on us also doesn't run. - doneCh <- false - return - - case <-time.After(time.Second * 5): - log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q", - VertexName(v), VertexName(dep)) - } - } - } - - // Dependencies satisfied! We need to check if any errored - w.diagsLock.Lock() - defer w.diagsLock.Unlock() - for dep := range deps { - if w.diagsMap[dep].HasErrors() { - // One of our dependencies failed, so return false - doneCh <- false - return - } - } - - // All dependencies satisfied and successful - doneCh <- true -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go deleted file mode 100644 index b86bd792..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go +++ /dev/null @@ -1,63 +0,0 @@ -package earlyconfig - -import ( - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// A Config is a node in the tree of modules within a configuration. -// -// The module tree is constructed by following ModuleCall instances recursively -// through the root module transitively into descendent modules. -type Config struct { - // RootModule points to the Config for the root module within the same - // module tree as this module. If this module _is_ the root module then - // this is self-referential. - Root *Config - - // ParentModule points to the Config for the module that directly calls - // this module. If this is the root module then this field is nil. - Parent *Config - - // Path is a sequence of module logical names that traverse from the root - // module to this config. Path is empty for the root module. - // - // This should only be used to display paths to the end-user in rare cases - // where we are talking about the static module tree, before module calls - // have been resolved. In most cases, an addrs.ModuleInstance describing - // a node in the dynamic module tree is better, since it will then include - // any keys resulting from evaluating "count" and "for_each" arguments. - Path addrs.Module - - // ChildModules points to the Config for each of the direct child modules - // called from this module. The keys in this map match the keys in - // Module.ModuleCalls. - Children map[string]*Config - - // Module points to the object describing the configuration for the - // various elements (variables, resources, etc) defined by this module. - Module *tfconfig.Module - - // CallPos is the source position for the header of the module block that - // requested this module. - // - // This field is meaningless for the root module, where its contents are undefined. - CallPos tfconfig.SourcePos - - // SourceAddr is the source address that the referenced module was requested - // from, as specified in configuration. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddr string - - // Version is the specific version that was selected for this module, - // based on version constraints given in configuration. - // - // This field is nil if the module was loaded from a non-registry source, - // since versions are not supported for other sources. - // - // This field is meaningless for the root module, where it will always - // be nil. - Version *version.Version -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go deleted file mode 100644 index 3707f273..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go +++ /dev/null @@ -1,144 +0,0 @@ -package earlyconfig - -import ( - "fmt" - "sort" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// BuildConfig constructs a Config from a root module by loading all of its -// descendent modules via the given ModuleWalker. -func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - cfg := &Config{ - Module: root, - } - cfg.Root = cfg // Root module is self-referential. - cfg.Children, diags = buildChildModules(cfg, walker) - return cfg, diags -} - -func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := map[string]*Config{} - calls := parent.Module.ModuleCalls - - // We'll sort the calls by their local names so that they'll appear in a - // predictable order in any logging that's produced during the walk. - callNames := make([]string, 0, len(calls)) - for k := range calls { - callNames = append(callNames, k) - } - sort.Strings(callNames) - - for _, callName := range callNames { - call := calls[callName] - path := make([]string, len(parent.Path)+1) - copy(path, parent.Path) - path[len(path)-1] = call.Name - - var vc version.Constraints - if strings.TrimSpace(call.Version) != "" { - var err error - vc, err = version.NewConstraint(call.Version) - if err != nil { - diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ - Severity: tfconfig.DiagError, - Summary: "Invalid version constraint", - Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err), - })) - continue - } - } - - req := ModuleRequest{ - Name: call.Name, - Path: path, - SourceAddr: call.Source, - VersionConstraints: vc, - Parent: parent, - CallPos: call.Pos, - } - - mod, ver, modDiags := walker.LoadModule(&req) - diags = append(diags, modDiags...) - if mod == nil { - // nil can be returned if the source address was invalid and so - // nothing could be loaded whatsoever. LoadModule should've - // returned at least one error diagnostic in that case. - continue - } - - child := &Config{ - Parent: parent, - Root: parent.Root, - Path: path, - Module: mod, - CallPos: call.Pos, - SourceAddr: call.Source, - Version: ver, - } - - child.Children, modDiags = buildChildModules(child, walker) - diags = diags.Append(modDiags) - - ret[call.Name] = child - } - - return ret, diags -} - -// ModuleRequest is used as part of the ModuleWalker interface used with -// function BuildConfig. -type ModuleRequest struct { - // Name is the "logical name" of the module call within configuration. - // This is provided in case the name is used as part of a storage key - // for the module, but implementations must otherwise treat it as an - // opaque string. It is guaranteed to have already been validated as an - // HCL identifier and UTF-8 encoded. - Name string - - // Path is a list of logical names that traverse from the root module to - // this module. This can be used, for example, to form a lookup key for - // each distinct module call in a configuration, allowing for multiple - // calls with the same name at different points in the tree. - Path addrs.Module - - // SourceAddr is the source address string provided by the user in - // configuration. - SourceAddr string - - // VersionConstraint is the version constraint applied to the module in - // configuration. - VersionConstraints version.Constraints - - // Parent is the partially-constructed module tree node that the loaded - // module will be added to. Callers may refer to any field of this - // structure except Children, which is still under construction when - // ModuleRequest objects are created and thus has undefined content. - // The main reason this is provided is so that full module paths can - // be constructed for uniqueness. - Parent *Config - - // CallRange is the source position for the header of the "module" block - // in configuration that prompted this request. - CallPos tfconfig.SourcePos -} - -// ModuleWalker is an interface used with BuildConfig. -type ModuleWalker interface { - LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) -} - -// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps -// a callback function, for more convenient use of that interface. -type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) - -func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - return f(req) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go deleted file mode 100644 index b2e1807e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go +++ /dev/null @@ -1,78 +0,0 @@ -package earlyconfig - -import ( - "fmt" - - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics { - ret := make(tfdiags.Diagnostics, len(diags)) - for i, diag := range diags { - ret[i] = wrapDiagnostic(diag) - } - return ret -} - -func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic { - return wrappedDiagnostic{ - d: diag, - } -} - -type wrappedDiagnostic struct { - d tfconfig.Diagnostic -} - -func (d wrappedDiagnostic) Severity() tfdiags.Severity { - switch d.d.Severity { - case tfconfig.DiagError: - return tfdiags.Error - case tfconfig.DiagWarning: - return tfdiags.Warning - default: - // Should never happen since there are no other severities - return 0 - } -} - -func (d wrappedDiagnostic) Description() tfdiags.Description { - // Since the inspect library doesn't produce precise source locations, - // we include the position information as part of the error message text. - // See the comment inside method "Source" for more information. - switch { - case d.d.Pos == nil: - return tfdiags.Description{ - Summary: d.d.Summary, - Detail: d.d.Detail, - } - case d.d.Detail != "": - return tfdiags.Description{ - Summary: d.d.Summary, - Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail), - } - default: - return tfdiags.Description{ - Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line), - } - } -} - -func (d wrappedDiagnostic) Source() tfdiags.Source { - // Since the inspect library is constrained by the lowest common denominator - // between legacy HCL and modern HCL, it only returns ranges at whole-line - // granularity, and that isn't sufficient to populate a tfdiags.Source - // and so we'll just omit ranges altogether and include the line number in - // the Description text. - // - // Callers that want to return nicer errors should consider reacting to - // earlyconfig errors by attempting a follow-up parse with the normal - // config loader, which can produce more precise source location - // information. - return tfdiags.Source{} -} - -func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go deleted file mode 100644 index a9cf10f3..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package earlyconfig is a specialized alternative to the top-level "configs" -// package that does only shallow processing of configuration and is therefore -// able to be much more liberal than the full config loader in what it accepts. -// -// In particular, it can accept both current and legacy HCL syntax, and it -// ignores top-level blocks that it doesn't recognize. These two characteristics -// make this package ideal for dependency-checking use-cases so that we are -// more likely to be able to return an error message about an explicit -// incompatibility than to return a less-actionable message about a construct -// not being supported. -// -// However, its liberal approach also means it should be used sparingly. It -// exists primarily for "terraform init", so that it is able to detect -// incompatibilities more robustly when installing dependencies. For most -// other use-cases, use the "configs" and "configs/configload" packages. -// -// Package earlyconfig is a wrapper around the terraform-config-inspect -// codebase, adding to it just some helper functionality for Terraform's own -// use-cases. -package earlyconfig diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go deleted file mode 100644 index 11eff2eb..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go +++ /dev/null @@ -1,13 +0,0 @@ -package earlyconfig - -import ( - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// LoadModule loads some top-level metadata for the module in the given -// directory. -func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) { - mod, diags := tfconfig.LoadModule(dir) - return mod, wrapDiagnostics(diags) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go deleted file mode 100644 index 1bb7b9f2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go +++ /dev/null @@ -1,152 +0,0 @@ -package flatmap - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" -) - -// Expand takes a map and a key (prefix) and expands that value into -// a more complex structure. This is the reverse of the Flatten operation. -func Expand(m map[string]string, key string) interface{} { - // If the key is exactly a key in the map, just return it - if v, ok := m[key]; ok { - if v == "true" { - return true - } else if v == "false" { - return false - } - - return v - } - - // Check if the key is an array, and if so, expand the array - if v, ok := m[key+".#"]; ok { - // If the count of the key is unknown, then just put the unknown - // value in the value itself. This will be detected by Terraform - // core later. - if v == hcl2shim.UnknownVariableValue { - return v - } - - return expandArray(m, key) - } - - // Check if this is a prefix in the map - prefix := key + "." - for k := range m { - if strings.HasPrefix(k, prefix) { - return expandMap(m, prefix) - } - } - - return nil -} - -func expandArray(m map[string]string, prefix string) []interface{} { - num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) - if err != nil { - panic(err) - } - - // If the number of elements in this array is 0, then return an - // empty slice as there is nothing to expand. Trying to expand it - // anyway could lead to crashes as any child maps, arrays or sets - // that no longer exist are still shown as empty with a count of 0. - if num == 0 { - return []interface{}{} - } - - // NOTE: "num" is not necessarily accurate, e.g. if a user tampers - // with state, so the following code should not crash when given a - // number of items more or less than what's given in num. The - // num key is mainly just a hint that this is a list or set. - - // The Schema "Set" type stores its values in an array format, but - // using numeric hash values instead of ordinal keys. Take the set - // of keys regardless of value, and expand them in numeric order. - // See GH-11042 for more details. - keySet := map[int]bool{} - computed := map[string]bool{} - for k := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - - key := k[len(prefix)+1:] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - - // skip the count value - if key == "#" { - continue - } - - // strip the computed flag if there is one - if strings.HasPrefix(key, "~") { - key = key[1:] - computed[key] = true - } - - k, err := strconv.Atoi(key) - if err != nil { - panic(err) - } - keySet[int(k)] = true - } - - keysList := make([]int, 0, num) - for key := range keySet { - keysList = append(keysList, key) - } - sort.Ints(keysList) - - result := make([]interface{}, len(keysList)) - for i, key := range keysList { - keyString := strconv.Itoa(key) - if computed[keyString] { - keyString = "~" + keyString - } - result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) - } - - return result -} - -func expandMap(m map[string]string, prefix string) map[string]interface{} { - // Submaps may not have a '%' key, so we can't count on this value being - // here. If we don't have a count, just proceed as if we have have a map. - if count, ok := m[prefix+"%"]; ok && count == "0" { - return map[string]interface{}{} - } - - result := make(map[string]interface{}) - for k := range m { - if !strings.HasPrefix(k, prefix) { - continue - } - - key := k[len(prefix):] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - if _, ok := result[key]; ok { - continue - } - - // skip the map count value - if key == "%" { - continue - } - - result[key] = Expand(m, k[:len(prefix)+len(key)]) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go deleted file mode 100644 index 9ff6e426..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go +++ /dev/null @@ -1,71 +0,0 @@ -package flatmap - -import ( - "fmt" - "reflect" -) - -// Flatten takes a structure and turns into a flat map[string]string. -// -// Within the "thing" parameter, only primitive values are allowed. Structs are -// not supported. Therefore, it can only be slices, maps, primitives, and -// any combination of those together. -// -// See the tests for examples of what inputs are turned into. -func Flatten(thing map[string]interface{}) Map { - result := make(map[string]string) - - for k, raw := range thing { - flatten(result, k, reflect.ValueOf(raw)) - } - - return Map(result) -} - -func flatten(result map[string]string, prefix string, v reflect.Value) { - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - result[prefix] = "true" - } else { - result[prefix] = "false" - } - case reflect.Int: - result[prefix] = fmt.Sprintf("%d", v.Int()) - case reflect.Map: - flattenMap(result, prefix, v) - case reflect.Slice: - flattenSlice(result, prefix, v) - case reflect.String: - result[prefix] = v.String() - default: - panic(fmt.Sprintf("Unknown: %s", v)) - } -} - -func flattenMap(result map[string]string, prefix string, v reflect.Value) { - for _, k := range v.MapKeys() { - if k.Kind() == reflect.Interface { - k = k.Elem() - } - - if k.Kind() != reflect.String { - panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) - } - - flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) - } -} - -func flattenSlice(result map[string]string, prefix string, v reflect.Value) { - prefix = prefix + "." - - result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) - for i := 0; i < v.Len(); i++ { - flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go deleted file mode 100644 index 435e04a3..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go +++ /dev/null @@ -1,82 +0,0 @@ -package flatmap - -import ( - "strings" -) - -// Map is a wrapper around map[string]string that provides some helpers -// above it that assume the map is in the format that flatmap expects -// (the result of Flatten). -// -// All modifying functions such as Delete are done in-place unless -// otherwise noted. -type Map map[string]string - -// Contains returns true if the map contains the given key. -func (m Map) Contains(key string) bool { - for _, k := range m.Keys() { - if k == key { - return true - } - } - - return false -} - -// Delete deletes a key out of the map with the given prefix. -func (m Map) Delete(prefix string) { - for k := range m { - match := k == prefix - if !match { - if !strings.HasPrefix(k, prefix) { - continue - } - - if k[len(prefix):len(prefix)+1] != "." { - continue - } - } - - delete(m, k) - } -} - -// Keys returns all of the top-level keys in this map -func (m Map) Keys() []string { - ks := make(map[string]struct{}) - for k := range m { - idx := strings.Index(k, ".") - if idx == -1 { - idx = len(k) - } - - ks[k[:idx]] = struct{}{} - } - - result := make([]string, 0, len(ks)) - for k := range ks { - result = append(result, k) - } - - return result -} - -// Merge merges the contents of the other Map into this one. -// -// This merge is smarter than a simple map iteration because it -// will fully replace arrays and other complex structures that -// are present in this map with the other map's. For example, if -// this map has a 3 element "foo" list, and m2 has a 2 element "foo" -// list, then the result will be that m has a 2 element "foo" -// list. -func (m Map) Merge(m2 Map) { - for _, prefix := range m2.Keys() { - m.Delete(prefix) - - for k, v := range m2 { - if strings.HasPrefix(k, prefix) { - m[k] = v - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go deleted file mode 100644 index be5db8b9..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go +++ /dev/null @@ -1,214 +0,0 @@ -package config - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/flatmap" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// Validator is a helper that helps you validate the configuration -// of your resource, resource provider, etc. -// -// At the most basic level, set the Required and Optional lists to be -// specifiers of keys that are required or optional. If a key shows up -// that isn't in one of these two lists, then an error is generated. -// -// The "specifiers" allowed in this is a fairly rich syntax to help -// describe the format of your configuration: -// -// * Basic keys are just strings. For example: "foo" will match the -// "foo" key. -// -// * Nested structure keys can be matched by doing -// "listener.*.foo". This will verify that there is at least one -// listener element that has the "foo" key set. -// -// * The existence of a nested structure can be checked by simply -// doing "listener.*" which will verify that there is at least -// one element in the "listener" structure. This is NOT -// validating that "listener" is an array. It is validating -// that it is a nested structure in the configuration. -// -type Validator struct { - Required []string - Optional []string -} - -func (v *Validator) Validate( - c *terraform.ResourceConfig) (ws []string, es []error) { - // Flatten the configuration so it is easier to reason about - flat := flatmap.Flatten(c.Raw) - - keySet := make(map[string]validatorKey) - for i, vs := range [][]string{v.Required, v.Optional} { - req := i == 0 - for _, k := range vs { - vk, err := newValidatorKey(k, req) - if err != nil { - es = append(es, err) - continue - } - - keySet[k] = vk - } - } - - purged := make([]string, 0) - for _, kv := range keySet { - p, w, e := kv.Validate(flat) - if len(w) > 0 { - ws = append(ws, w...) - } - if len(e) > 0 { - es = append(es, e...) - } - - purged = append(purged, p...) - } - - // Delete all the keys we processed in order to find - // the unknown keys. - for _, p := range purged { - delete(flat, p) - } - - // The rest are unknown - for k := range flat { - es = append(es, fmt.Errorf("Unknown configuration: %s", k)) - } - - return -} - -type validatorKey interface { - // Validate validates the given configuration and returns viewed keys, - // warnings, and errors. - Validate(map[string]string) ([]string, []string, []error) -} - -func newValidatorKey(k string, req bool) (validatorKey, error) { - var result validatorKey - - parts := strings.Split(k, ".") - if len(parts) > 1 && parts[1] == "*" { - result = &nestedValidatorKey{ - Parts: parts, - Required: req, - } - } else { - result = &basicValidatorKey{ - Key: k, - Required: req, - } - } - - return result, nil -} - -// basicValidatorKey validates keys that are basic such as "foo" -type basicValidatorKey struct { - Key string - Required bool -} - -func (v *basicValidatorKey) Validate( - m map[string]string) ([]string, []string, []error) { - for k := range m { - // If we have the exact key its a match - if k == v.Key { - return []string{k}, nil, nil - } - } - - if !v.Required { - return nil, nil, nil - } - - return nil, nil, []error{fmt.Errorf( - "Key not found: %s", v.Key)} -} - -type nestedValidatorKey struct { - Parts []string - Required bool -} - -func (v *nestedValidatorKey) validate( - m map[string]string, - prefix string, - offset int) ([]string, []string, []error) { - if offset >= len(v.Parts) { - // We're at the end. Look for a specific key. - v2 := &basicValidatorKey{Key: prefix, Required: v.Required} - return v2.Validate(m) - } - - current := v.Parts[offset] - - // If we're at offset 0, special case to start at the next one. - if offset == 0 { - return v.validate(m, current, offset+1) - } - - // Determine if we're doing a "for all" or a specific key - if current != "*" { - // We're looking at a specific key, continue on. - return v.validate(m, prefix+"."+current, offset+1) - } - - // We're doing a "for all", so we loop over. - countStr, ok := m[prefix+".#"] - if !ok { - if !v.Required { - // It wasn't required, so its no problem. - return nil, nil, nil - } - - return nil, nil, []error{fmt.Errorf( - "Key not found: %s", prefix)} - } - - count, err := strconv.ParseInt(countStr, 0, 0) - if err != nil { - // This shouldn't happen if flatmap works properly - panic("invalid flatmap array") - } - - var e []error - var w []string - u := make([]string, 1, count+1) - u[0] = prefix + ".#" - for i := 0; i < int(count); i++ { - prefix := fmt.Sprintf("%s.%d", prefix, i) - - // Mark that we saw this specific key - u = append(u, prefix) - - // Mark all prefixes of this - for k := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - u = append(u, k) - } - - // If we have more parts, then validate deeper - if offset+1 < len(v.Parts) { - u2, w2, e2 := v.validate(m, prefix, offset+1) - - u = append(u, u2...) - w = append(w, w2...) - e = append(e, e2...) - } - } - - return u, w, e -} - -func (v *nestedValidatorKey) Validate( - m map[string]string) ([]string, []string, []error) { - return v.validate(m, "", 0) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go deleted file mode 100644 index 54899bc6..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go +++ /dev/null @@ -1,24 +0,0 @@ -package didyoumean - -import ( - "github.com/agext/levenshtein" -) - -// NameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func NameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go deleted file mode 100644 index ad8d626c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go +++ /dev/null @@ -1,53 +0,0 @@ -package httpclient - -import ( - "fmt" - "log" - "net/http" - "os" - "strings" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform-plugin-sdk/internal/version" -) - -const uaEnvVar = "TF_APPEND_USER_AGENT" -const userAgentFormat = "Terraform/%s" - -// New returns the DefaultPooledClient from the cleanhttp -// package that will also send a Terraform User-Agent string. -func New() *http.Client { - cli := cleanhttp.DefaultPooledClient() - cli.Transport = &userAgentRoundTripper{ - userAgent: UserAgentString(), - inner: cli.Transport, - } - return cli -} - -type userAgentRoundTripper struct { - inner http.RoundTripper - userAgent string -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", rt.userAgent) - } - log.Printf("[TRACE] HTTP client %s request to %s", req.Method, req.URL.String()) - return rt.inner.RoundTrip(req) -} - -func UserAgentString() string { - ua := fmt.Sprintf(userAgentFormat, version.Version) - - if add := os.Getenv(uaEnvVar); add != "" { - add = strings.TrimSpace(add) - if len(add) > 0 { - ua += " " + add - log.Printf("[DEBUG] Using modified User-Agent: %s", ua) - } - } - - return ua -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go deleted file mode 100644 index 7096ff74..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go +++ /dev/null @@ -1,125 +0,0 @@ -package initwd - -import ( - "io" - "os" - "path/filepath" - "strings" -) - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -func copyDir(dst, src string) error { - src, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if path == src { - return nil - } - - if strings.HasPrefix(filepath.Base(path), ".") { - // Skip any dot files - if info.IsDir() { - return filepath.SkipDir - } else { - return nil - } - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // we don't want to try and copy the same file over itself. - if eq, err := sameFile(path, dstPath); eq { - return nil - } else if err != nil { - return err - } - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - - return nil - } - - // If the current path is a symlink, recreate the symlink relative to - // the dst directory - if info.Mode()&os.ModeSymlink == os.ModeSymlink { - target, err := os.Readlink(path) - if err != nil { - return err - } - - return os.Symlink(target, dstPath) - } - - // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := io.Copy(dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) - } - - return filepath.Walk(src, walkFn) -} - -// sameFile tried to determine if to paths are the same file. -// If the paths don't match, we lookup the inode on supported systems. -func sameFile(a, b string) (bool, error) { - if a == b { - return true, nil - } - - aIno, err := inode(a) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - bIno, err := inode(b) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - if aIno > 0 && aIno == bIno { - return true, nil - } - - return false, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go deleted file mode 100644 index b9d938db..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package initwd contains various helper functions used by the "terraform init" -// command to initialize a working directory. -// -// These functions may also be used from testing code to simulate the behaviors -// of "terraform init" against test fixtures, but should not be used elsewhere -// in the main code. -package initwd diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go deleted file mode 100644 index 641e71de..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go +++ /dev/null @@ -1,363 +0,0 @@ -package initwd - -import ( - "fmt" - "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -const initFromModuleRootCallName = "root" -const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "." - -// DirFromModule populates the given directory (which must exist and be -// empty) with the contents of the module at the given source address. -// -// It does this by installing the given module and all of its descendent -// modules in a temporary root directory and then copying the installed -// files into suitable locations. As a consequence, any diagnostics it -// generates will reveal the location of this temporary directory to the -// user. -// -// This rather roundabout installation approach is taken to ensure that -// installation proceeds in a manner identical to normal module installation. -// -// If the given source address specifies a sub-directory of the given -// package then only the sub-directory and its descendents will be copied -// into the given root directory, which will cause any relative module -// references using ../ from that module to be unresolvable. Error diagnostics -// are produced in that case, to prompt the user to rewrite the source strings -// to be absolute references to the original remote module. -func DirFromModule(rootDir, modulesDir, sourceAddr string, reg *registry.Client, hooks ModuleInstallHooks) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // The way this function works is pretty ugly, but we accept it because - // -from-module is a less important case than normal module installation - // and so it's better to keep this ugly complexity out here rather than - // adding even more complexity to the normal module installer. - - // The target directory must exist but be empty. - { - entries, err := ioutil.ReadDir(rootDir) - if err != nil { - if os.IsNotExist(err) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Target directory does not exist", - fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read target directory", - fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err), - )) - } - return diags - } - haveEntries := false - for _, entry := range entries { - if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" { - continue - } - haveEntries = true - } - if haveEntries { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Can't populate non-empty directory", - fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir), - )) - return diags - } - } - - instDir := filepath.Join(rootDir, ".terraform/init-from-module") - inst := NewModuleInstaller(instDir, reg) - log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr) - os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too - err := os.MkdirAll(instDir, os.ModePerm) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create temporary directory", - fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err), - )) - return diags - } - - instManifest := make(modsdir.Manifest) - retManifest := make(modsdir.Manifest) - - fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr) - fakePos := tfconfig.SourcePos{ - Filename: fakeFilename, - Line: 1, - } - - // -from-module allows relative paths but it's different than a normal - // module address where it'd be resolved relative to the module call - // (which is synthetic, here.) To address this, we'll just patch up any - // relative paths to be absolute paths before we run, ensuring we'll - // get the right result. This also, as an important side-effect, ensures - // that the result will be "downloaded" with go-getter (copied from the - // source location), rather than just recorded as a relative path. - { - maybePath := filepath.ToSlash(sourceAddr) - if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") { - if wd, err := os.Getwd(); err == nil { - sourceAddr = filepath.Join(wd, sourceAddr) - log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr) - } - } - } - - // Now we need to create an artificial root module that will seed our - // installation process. - fakeRootModule := &tfconfig.Module{ - ModuleCalls: map[string]*tfconfig.ModuleCall{ - initFromModuleRootCallName: { - Name: initFromModuleRootCallName, - Source: sourceAddr, - Pos: fakePos, - }, - }, - } - - // wrapHooks filters hook notifications to only include Download calls - // and to trim off the initFromModuleRootCallName prefix. We'll produce - // our own Install notifications directly below. - wrapHooks := installHooksInitDir{ - Wrapped: hooks, - } - getter := reusingGetter{} - _, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter) - diags = append(diags, instDiags...) - if instDiags.HasErrors() { - return diags - } - - // If all of that succeeded then we'll now migrate what was installed - // into the final directory structure. - err = os.MkdirAll(modulesDir, os.ModePerm) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create local modules directory", - fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err), - )) - return diags - } - - recordKeys := make([]string, 0, len(instManifest)) - for k := range instManifest { - recordKeys = append(recordKeys, k) - } - sort.Strings(recordKeys) - - for _, recordKey := range recordKeys { - record := instManifest[recordKey] - - if record.Key == initFromModuleRootCallName { - // We've found the module the user requested, which we must - // now copy into rootDir so it can be used directly. - log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir) - err := copyDir(rootDir, record.Dir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to copy root module", - fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err), - )) - continue - } - - // We'll try to load the newly-copied module here just so we can - // sniff for any module calls that ../ out of the root directory - // and must thus be rewritten to be absolute addresses again. - // For now we can't do this rewriting automatically, but we'll - // generate an error to help the user do it manually. - mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway - if mod != nil { - for _, mc := range mod.ModuleCalls { - if pathTraversesUp(mc.Source) { - packageAddr, givenSubdir := splitAddrSubdir(sourceAddr) - newSubdir := filepath.Join(givenSubdir, mc.Source) - if pathTraversesUp(newSubdir) { - // This should never happen in any reasonable - // configuration since this suggests a path that - // traverses up out of the package root. We'll just - // ignore this, since we'll fail soon enough anyway - // trying to resolve this path when this module is - // loaded. - continue - } - - var newAddr = packageAddr - if newSubdir != "" { - newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir)) - } - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Root module references parent directory", - fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr), - )) - continue - } - } - } - - retManifest[""] = modsdir.Record{ - Key: "", - Dir: rootDir, - } - continue - } - - if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) { - // Ignore the *real* root module, whose key is empty, since - // we're only interested in the module named "root" and its - // descendents. - continue - } - - newKey := record.Key[len(initFromModuleRootKeyPrefix):] - instPath := filepath.Join(modulesDir, newKey) - tempPath := filepath.Join(instDir, record.Key) - - // tempPath won't be present for a module that was installed from - // a relative path, so in that case we just record the installation - // directory and assume it was already copied into place as part - // of its parent. - if _, err := os.Stat(tempPath); err != nil { - if !os.IsNotExist(err) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to stat temporary module install directory", - fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err), - )) - continue - } - - var parentKey string - if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 { - parentKey = newKey[:lastDot] - } else { - parentKey = "" // parent is the root module - } - - parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey] - parentNew := retManifest[parentKey] - - // We need to figure out which portion of our directory is the - // parent package path and which portion is the subdirectory - // under that. - baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir) - if err != nil { - // Should never happen, because we constructed both directories - // from the same base and so they must have a common prefix. - panic(err) - } - - newDir := filepath.Join(parentNew.Dir, baseDirRel) - log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir) - newRecord := record // shallow copy - newRecord.Dir = newDir - newRecord.Key = newKey - retManifest[newKey] = newRecord - hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) - continue - } - - err = os.MkdirAll(instPath, os.ModePerm) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create module install directory", - fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err), - )) - continue - } - - // We copy rather than "rename" here because renaming between directories - // can be tricky in edge-cases like network filesystems, etc. - log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath) - err := copyDir(instPath, tempPath) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to copy descendent module", - fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err), - )) - continue - } - - subDir, err := filepath.Rel(tempPath, record.Dir) - if err != nil { - // Should never happen, because we constructed both directories - // from the same base and so they must have a common prefix. - panic(err) - } - - newRecord := record // shallow copy - newRecord.Dir = filepath.Join(instPath, subDir) - newRecord.Key = newKey - retManifest[newKey] = newRecord - hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) - } - - retManifest.WriteSnapshotToDir(modulesDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to write module manifest", - fmt.Sprintf("Error writing module manifest: %s.", err), - )) - } - - if !diags.HasErrors() { - // Try to clean up our temporary directory, but don't worry if we don't - // succeed since it shouldn't hurt anything. - os.RemoveAll(instDir) - } - - return diags -} - -func pathTraversesUp(path string) bool { - return strings.HasPrefix(filepath.ToSlash(path), "../") -} - -// installHooksInitDir is an adapter wrapper for an InstallHooks that -// does some fakery to make downloads look like they are happening in their -// final locations, rather than in the temporary loader we use. -// -// It also suppresses "Install" calls entirely, since InitDirFromModule -// does its own installation steps after the initial installation pass -// has completed. -type installHooksInitDir struct { - Wrapped ModuleInstallHooks - ModuleInstallHooksImpl -} - -func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) { - if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) { - // We won't announce the root module, since hook implementations - // don't expect to see that and the caller will usually have produced - // its own user-facing notification about what it's doing anyway. - return - } - - trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):] - h.Wrapped.Download(trimAddr, packageAddr, version) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go deleted file mode 100644 index 8dc0374b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go +++ /dev/null @@ -1,204 +0,0 @@ -package initwd - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - getter "github.com/hashicorp/go-getter" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" -) - -// We configure our own go-getter detector and getter sets here, because -// the set of sources we support is part of Terraform's documentation and -// so we don't want any new sources introduced in go-getter to sneak in here -// and work even though they aren't documented. This also insulates us from -// any meddling that might be done by other go-getter callers linked into our -// executable. - -var goGetterNoDetectors = []getter.Detector{} - -var goGetterDecompressors = map[string]getter.Decompressor{ - "bz2": new(getter.Bzip2Decompressor), - "gz": new(getter.GzipDecompressor), - "xz": new(getter.XzDecompressor), - "zip": new(getter.ZipDecompressor), - - "tar.bz2": new(getter.TarBzip2Decompressor), - "tar.tbz2": new(getter.TarBzip2Decompressor), - - "tar.gz": new(getter.TarGzipDecompressor), - "tgz": new(getter.TarGzipDecompressor), - - "tar.xz": new(getter.TarXzDecompressor), - "txz": new(getter.TarXzDecompressor), -} - -var goGetterGetters = map[string]getter.Getter{ - "file": new(getter.FileGetter), - "gcs": new(getter.GCSGetter), - "git": new(getter.GitGetter), - "hg": new(getter.HgGetter), - "s3": new(getter.S3Getter), - "http": getterHTTPGetter, - "https": getterHTTPGetter, -} - -var getterHTTPClient = cleanhttp.DefaultClient() - -var getterHTTPGetter = &getter.HttpGetter{ - Client: getterHTTPClient, - Netrc: true, -} - -// A reusingGetter is a helper for the module installer that remembers -// the final resolved addresses of all of the sources it has already been -// asked to install, and will copy from a prior installation directory if -// it has the same resolved source address. -// -// The keys in a reusingGetter are resolved and trimmed source addresses -// (with a scheme always present, and without any "subdir" component), -// and the values are the paths where each source was previously installed. -type reusingGetter map[string]string - -// getWithGoGetter retrieves the package referenced in the given address -// into the installation path and then returns the full path to any subdir -// indicated in the address. -// -// The errors returned by this function are those surfaced by the underlying -// go-getter library, which have very inconsistent quality as -// end-user-actionable error messages. At this time we do not have any -// reasonable way to improve these error messages at this layer because -// the underlying errors are not separately recognizable. -func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { - packageAddr, subDir := splitAddrSubdir(addr) - - log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) - - realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors) - if err != nil { - return "", err - } - - if isMaybeRelativeLocalPath(realAddr) { - return "", &MaybeRelativePathErr{addr} - } - - var realSubDir string - realAddr, realSubDir = splitAddrSubdir(realAddr) - if realSubDir != "" { - subDir = filepath.Join(realSubDir, subDir) - } - - if realAddr != packageAddr { - log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) - } - - if prevDir, exists := g[realAddr]; exists { - log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) - err := os.Mkdir(instPath, os.ModePerm) - if err != nil { - return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) - } - err = copyDir(instPath, prevDir) - if err != nil { - return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) - } - } else { - log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) - client := getter.Client{ - Src: realAddr, - Dst: instPath, - Pwd: instPath, - - Mode: getter.ClientModeDir, - - Detectors: goGetterNoDetectors, // we already did detection above - Decompressors: goGetterDecompressors, - Getters: goGetterGetters, - } - err = client.Get() - if err != nil { - return "", err - } - // Remember where we installed this so we might reuse this directory - // on subsequent calls to avoid re-downloading. - g[realAddr] = instPath - } - - // Our subDir string can contain wildcards until this point, so that - // e.g. a subDir of * can expand to one top-level directory in a .tar.gz - // archive. Now that we've expanded the archive successfully we must - // resolve that into a concrete path. - var finalDir string - if subDir != "" { - finalDir, err = getter.SubdirGlob(instPath, subDir) - log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) - if err != nil { - return "", err - } - } else { - finalDir = instPath - } - - // If we got this far then we have apparently succeeded in downloading - // the requested object! - return filepath.Clean(finalDir), nil -} - -// splitAddrSubdir splits the given address (which is assumed to be a -// registry address or go-getter-style address) into a package portion -// and a sub-directory portion. -// -// The package portion defines what should be downloaded and then the -// sub-directory portion, if present, specifies a sub-directory within -// the downloaded object (an archive, VCS repository, etc) that contains -// the module's configuration files. -// -// The subDir portion will be returned as empty if no subdir separator -// ("//") is present in the address. -func splitAddrSubdir(addr string) (packageAddr, subDir string) { - return getter.SourceDirSubdir(addr) -} - -var localSourcePrefixes = []string{ - "./", - "../", - ".\\", - "..\\", -} - -func isLocalSourceAddr(addr string) bool { - for _, prefix := range localSourcePrefixes { - if strings.HasPrefix(addr, prefix) { - return true - } - } - return false -} - -func isRegistrySourceAddr(addr string) bool { - _, err := regsrc.ParseModuleSource(addr) - return err == nil -} - -type MaybeRelativePathErr struct { - Addr string -} - -func (e *MaybeRelativePathErr) Error() string { - return fmt.Sprintf("Terraform cannot determine the module source for %s", e.Addr) -} - -func isMaybeRelativeLocalPath(addr string) bool { - if strings.HasPrefix(addr, "file://") { - _, err := os.Stat(addr[7:]) - if err != nil { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go deleted file mode 100644 index 1150b093..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux darwin openbsd netbsd solaris dragonfly - -package initwd - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return st.Ino, nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go deleted file mode 100644 index 30532f54..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build freebsd - -package initwd - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return uint64(st.Ino), nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go deleted file mode 100644 index 3ed58e4b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package initwd - -// no syscall.Stat_t on windows, return 0 for inodes -func inode(path string) (uint64, error) { - return 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go deleted file mode 100644 index 8e055756..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go +++ /dev/null @@ -1,558 +0,0 @@ -package initwd - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -type ModuleInstaller struct { - modsDir string - reg *registry.Client -} - -func NewModuleInstaller(modsDir string, reg *registry.Client) *ModuleInstaller { - return &ModuleInstaller{ - modsDir: modsDir, - reg: reg, - } -} - -// InstallModules analyses the root module in the given directory and installs -// all of its direct and transitive dependencies into the given modules -// directory, which must already exist. -// -// Since InstallModules makes possibly-time-consuming calls to remote services, -// a hook interface is supported to allow the caller to be notified when -// each module is installed and, for remote modules, when downloading begins. -// LoadConfig guarantees that two hook calls will not happen concurrently but -// it does not guarantee any particular ordering of hook calls. This mechanism -// is for UI feedback only and does not give the caller any control over the -// process. -// -// If modules are already installed in the target directory, they will be -// skipped unless their source address or version have changed or unless -// the upgrade flag is set. -// -// InstallModules never deletes any directory, except in the case where it -// needs to replace a directory that is already present with a newly-extracted -// package. -// -// If the returned diagnostics contains errors then the module installation -// may have wholly or partially completed. Modules must be loaded in order -// to find their dependencies, so this function does many of the same checks -// as LoadConfig as a side-effect. -// -// If successful (the returned diagnostics contains no errors) then the -// first return value is the early configuration tree that was constructed by -// the installation process. -func (i *ModuleInstaller) InstallModules(rootDir string, upgrade bool, hooks ModuleInstallHooks) (*earlyconfig.Config, tfdiags.Diagnostics) { - log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir) - - rootMod, diags := earlyconfig.LoadModule(rootDir) - if rootMod == nil { - return nil, diags - } - - manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read modules manifest file", - fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err), - )) - return nil, diags - } - - getter := reusingGetter{} - cfg, instDiags := i.installDescendentModules(rootMod, rootDir, manifest, upgrade, hooks, getter) - diags = append(diags, instDiags...) - - return cfg, diags -} - -func (i *ModuleInstaller) installDescendentModules(rootMod *tfconfig.Module, rootDir string, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, getter reusingGetter) (*earlyconfig.Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if hooks == nil { - // Use our no-op implementation as a placeholder - hooks = ModuleInstallHooksImpl{} - } - - // Create a manifest record for the root module. This will be used if - // there are any relative-pathed modules in the root. - manifest[""] = modsdir.Record{ - Key: "", - Dir: rootDir, - } - - cfg, cDiags := earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( - func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - - key := manifest.ModuleKey(req.Path) - instPath := i.packageInstallPath(req.Path) - - log.Printf("[DEBUG] Module installer: begin %s", key) - - // First we'll check if we need to upgrade/replace an existing - // installed module, and delete it out of the way if so. - replace := upgrade - if !replace { - record, recorded := manifest[key] - switch { - case !recorded: - log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key) - replace = true - case record.SourceAddr != req.SourceAddr: - log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr) - replace = true - case record.Version != nil && !req.VersionConstraints.Check(record.Version): - log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraints) - replace = true - } - } - - // If we _are_ planning to replace this module, then we'll remove - // it now so our installation code below won't conflict with any - // existing remnants. - if replace { - if _, recorded := manifest[key]; recorded { - log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key) - } - delete(manifest, key) - // Deleting a module invalidates all of its descendent modules too. - keyPrefix := key + "." - for subKey := range manifest { - if strings.HasPrefix(subKey, keyPrefix) { - if _, recorded := manifest[subKey]; recorded { - log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey) - } - delete(manifest, subKey) - } - } - } - - record, recorded := manifest[key] - if !recorded { - // Clean up any stale cache directory that might be present. - // If this is a local (relative) source then the dir will - // not exist, but we'll ignore that. - log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key) - err := os.RemoveAll(instPath) - if err != nil && !os.IsNotExist(err) { - log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to remove local module cache", - fmt.Sprintf( - "Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s", - instPath, err, - ), - )) - return nil, nil, diags - } - } else { - // If this module is already recorded and its root directory - // exists then we will just load what's already there and - // keep our existing record. - info, err := os.Stat(record.Dir) - if err == nil && info.IsDir() { - mod, mDiags := earlyconfig.LoadModule(record.Dir) - diags = diags.Append(mDiags) - - log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir) - return mod, record.Version, diags - } - } - - // If we get down here then it's finally time to actually install - // the module. There are some variants to this process depending - // on what type of module source address we have. - switch { - - case isLocalSourceAddr(req.SourceAddr): - log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, req.SourceAddr) - mod, mDiags := i.installLocalModule(req, key, manifest, hooks) - diags = append(diags, mDiags...) - return mod, nil, diags - - case isRegistrySourceAddr(req.SourceAddr): - addr, err := regsrc.ParseModuleSource(req.SourceAddr) - if err != nil { - // Should never happen because isRegistrySourceAddr already validated - panic(err) - } - log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr) - - mod, v, mDiags := i.installRegistryModule(req, key, instPath, addr, manifest, hooks, getter) - diags = append(diags, mDiags...) - return mod, v, diags - - default: - log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, req.SourceAddr) - - mod, mDiags := i.installGoGetterModule(req, key, instPath, manifest, hooks, getter) - diags = append(diags, mDiags...) - return mod, nil, diags - } - - }, - )) - diags = append(diags, cDiags...) - - err := manifest.WriteSnapshotToDir(i.modsDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to update module manifest", - fmt.Sprintf("Unable to write the module manifest file: %s", err), - )) - } - - return cfg, diags -} - -func (i *ModuleInstaller) installLocalModule(req *earlyconfig.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*tfconfig.Module, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - parentKey := manifest.ModuleKey(req.Parent.Path) - parentRecord, recorded := manifest[parentKey] - if !recorded { - // This is indicative of a bug rather than a user-actionable error - panic(fmt.Errorf("missing manifest record for parent module %s", parentKey)) - } - - if len(req.VersionConstraints) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid version constraint", - fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } - - // For local sources we don't actually need to modify the - // filesystem at all because the parent already wrote - // the files we need, and so we just load up what's already here. - newDir := filepath.Join(parentRecord.Dir, req.SourceAddr) - - log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir) - // it is possible that the local directory is a symlink - newDir, err := filepath.EvalSymlinks(newDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("Unable to evaluate directory symlink: %s", err.Error()), - )) - } - - mod, mDiags := earlyconfig.LoadModule(newDir) - if mod == nil { - // nil indicates missing or unreadable directory, so we'll - // discard the returned diags and return a more specific - // error message here. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } else { - diags = diags.Append(mDiags) - } - - // Note the local location in our manifest. - manifest[key] = modsdir.Record{ - Key: key, - Dir: newDir, - SourceAddr: req.SourceAddr, - } - log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir) - hooks.Install(key, nil, newDir) - - return mod, diags -} - -func (i *ModuleInstaller) installRegistryModule(req *earlyconfig.ModuleRequest, key string, instPath string, addr *regsrc.Module, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - hostname, err := addr.SvcHost() - if err != nil { - // If it looks like the user was trying to use punycode then we'll generate - // a specialized error for that case. We require the unicode form of - // hostname so that hostnames are always human-readable in configuration - // and punycode can't be used to hide a malicious module hostname. - if strings.HasPrefix(addr.RawHost.Raw, "xn--") { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid module registry hostname", - fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid module registry hostname", - fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not a valid hostname.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } - return nil, nil, diags - } - - reg := i.reg - - log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname) - resp, err := reg.ModuleVersions(addr) - if err != nil { - if registry.IsModuleNotFound(err) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Module not found", - fmt.Sprintf("Module %q (from %s:%d) cannot be found in the module registry at %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error accessing remote module registry", - fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname, err), - )) - } - return nil, nil, diags - } - - // The response might contain information about dependencies to allow us - // to potentially optimize future requests, but we don't currently do that - // and so for now we'll just take the first item which is guaranteed to - // be the address we requested. - if len(resp.Modules) < 1 { - // Should never happen, but since this is a remote service that may - // be implemented by third-parties we will handle it gracefully. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid response from remote module registry", - fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - return nil, nil, diags - } - - modMeta := resp.Modules[0] - - var latestMatch *version.Version - var latestVersion *version.Version - for _, mv := range modMeta.Versions { - v, err := version.NewVersion(mv.Version) - if err != nil { - // Should never happen if the registry server is compliant with - // the protocol, but we'll warn if not to assist someone who - // might be developing a module registry server. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Invalid response from remote module registry", - fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which Terraform ignored.", hostname, mv.Version, req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - continue - } - - // If we've found a pre-release version then we'll ignore it unless - // it was exactly requested. - if v.Prerelease() != "" && req.VersionConstraints.String() != v.String() { - log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v) - continue - } - - if latestVersion == nil || v.GreaterThan(latestVersion) { - latestVersion = v - } - - if req.VersionConstraints.Check(v) { - if latestMatch == nil || v.GreaterThan(latestMatch) { - latestMatch = v - } - } - } - - if latestVersion == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Module has no versions", - fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallPos.Filename, req.CallPos.Line, hostname), - )) - return nil, nil, diags - } - - if latestMatch == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unresolvable module version constraint", - fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallPos.Filename, req.CallPos.Line, latestVersion), - )) - return nil, nil, diags - } - - // Report up to the caller that we're about to start downloading. - packageAddr, _ := splitAddrSubdir(req.SourceAddr) - hooks.Download(key, packageAddr, latestMatch) - - // If we manage to get down here then we've found a suitable version to - // install, so we need to ask the registry where we should download it from. - // The response to this is a go-getter-style address string. - dlAddr, err := reg.ModuleLocation(addr, latestMatch.String()) - if err != nil { - log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid response from remote module registry", - fmt.Sprintf("The remote registry at %s failed to return a download URL for %s %s.", hostname, addr, latestMatch), - )) - return nil, nil, diags - } - - log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, addr, latestMatch, dlAddr) - - modDir, err := getter.getWithGoGetter(instPath, dlAddr) - if err != nil { - // Errors returned by go-getter have very inconsistent quality as - // end-user error messages, but for now we're accepting that because - // we have no way to recognize any specific errors to improve them - // and masking the error entirely would hide valuable diagnostic - // information from the user. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to download module", - fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, dlAddr, err), - )) - return nil, nil, diags - } - - log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr, modDir) - - if addr.RawSubmodule != "" { - // Append the user's requested subdirectory to any subdirectory that - // was implied by any of the nested layers we expanded within go-getter. - modDir = filepath.Join(modDir, addr.RawSubmodule) - } - - log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir) - - // Finally we are ready to try actually loading the module. - mod, mDiags := earlyconfig.LoadModule(modDir) - if mod == nil { - // nil indicates missing or unreadable directory, so we'll - // discard the returned diags and return a more specific - // error message here. For registry modules this actually - // indicates a bug in the code above, since it's not the - // user's responsibility to create the directory in this case. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), - )) - } else { - diags = append(diags, mDiags...) - } - - // Note the local location in our manifest. - manifest[key] = modsdir.Record{ - Key: key, - Version: latestMatch, - Dir: modDir, - SourceAddr: req.SourceAddr, - } - log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) - hooks.Install(key, latestMatch, modDir) - - return mod, latestMatch, diags -} - -func (i *ModuleInstaller) installGoGetterModule(req *earlyconfig.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Report up to the caller that we're about to start downloading. - packageAddr, _ := splitAddrSubdir(req.SourceAddr) - hooks.Download(key, packageAddr, nil) - - if len(req.VersionConstraints) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid version constraint", - fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a non Registry URL.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - return nil, diags - } - - modDir, err := getter.getWithGoGetter(instPath, req.SourceAddr) - if err != nil { - if _, ok := err.(*MaybeRelativePathErr); ok { - log.Printf( - "[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../", - req.SourceAddr, - ) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Module not found", - fmt.Sprintf( - "The module address %q could not be resolved.\n\n"+ - "If you intended this as a path relative to the current "+ - "module, use \"./%s\" instead. The \"./\" prefix "+ - "indicates that the address is a relative filesystem path.", - req.SourceAddr, req.SourceAddr, - ), - )) - } else { - // Errors returned by go-getter have very inconsistent quality as - // end-user error messages, but for now we're accepting that because - // we have no way to recognize any specific errors to improve them - // and masking the error entirely would hide valuable diagnostic - // information from the user. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to download module", - fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s", req.Name, req.CallPos.Filename, req.CallPos.Line, packageAddr, err), - )) - } - return nil, diags - - } - - log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, req.SourceAddr, modDir) - - mod, mDiags := earlyconfig.LoadModule(modDir) - if mod == nil { - // nil indicates missing or unreadable directory, so we'll - // discard the returned diags and return a more specific - // error message here. For go-getter modules this actually - // indicates a bug in the code above, since it's not the - // user's responsibility to create the directory in this case. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), - )) - } else { - diags = append(diags, mDiags...) - } - - // Note the local location in our manifest. - manifest[key] = modsdir.Record{ - Key: key, - Dir: modDir, - SourceAddr: req.SourceAddr, - } - log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) - hooks.Install(key, nil, modDir) - - return mod, diags -} - -func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string { - return filepath.Join(i.modsDir, strings.Join(modulePath, ".")) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go deleted file mode 100644 index 817a6dc8..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go +++ /dev/null @@ -1,36 +0,0 @@ -package initwd - -import ( - version "github.com/hashicorp/go-version" -) - -// ModuleInstallHooks is an interface used to provide notifications about the -// installation process being orchestrated by InstallModules. -// -// This interface may have new methods added in future, so implementers should -// embed InstallHooksImpl to get no-op implementations of any unimplemented -// methods. -type ModuleInstallHooks interface { - // Download is called for modules that are retrieved from a remote source - // before that download begins, to allow a caller to give feedback - // on progress through a possibly-long sequence of downloads. - Download(moduleAddr, packageAddr string, version *version.Version) - - // Install is called for each module that is installed, even if it did - // not need to be downloaded from a remote source. - Install(moduleAddr string, version *version.Version, localPath string) -} - -// ModuleInstallHooksImpl is a do-nothing implementation of InstallHooks that -// can be embedded in another implementation struct to allow only partial -// implementation of the interface. -type ModuleInstallHooksImpl struct { -} - -func (h ModuleInstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) { -} - -func (h ModuleInstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) { -} - -var _ ModuleInstallHooks = ModuleInstallHooksImpl{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go deleted file mode 100644 index 8f89909c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package blocktoattr includes some helper functions that can perform -// preprocessing on a HCL body where a configschema.Block schema is available -// in order to allow list and set attributes defined in the schema to be -// optionally written by the user as block syntax. -package blocktoattr diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go deleted file mode 100644 index f782f6b7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go +++ /dev/null @@ -1,187 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization -// functionality to allow attributes that are specified as having list or set -// type in the schema to be written with HCL block syntax as multiple nested -// blocks with the attribute name as the block type. -// -// This partially restores some of the block/attribute confusion from HCL 1 -// so that existing patterns that depended on that confusion can continue to -// be used in the short term while we settle on a longer-term strategy. -// -// Most of the fixup work is actually done when the returned body is -// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual -// decode of the body might not, if the content of the body is so ambiguous -// that there's no safe way to map it to the schema. -func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body { - // The schema should never be nil, but in practice it seems to be sometimes - // in the presence of poorly-configured test mocks, so we'll be robust - // by synthesizing an empty one. - if schema == nil { - schema = &configschema.Block{} - } - - return &fixupBody{ - original: body, - schema: schema, - names: ambiguousNames(schema), - } -} - -type fixupBody struct { - original hcl.Body - schema *configschema.Block - names map[string]struct{} -} - -// Content decodes content from the body. The given schema must be the lower-level -// representation of the same schema that was previously passed to FixUpBlockAttrs, -// or else the result is undefined. -func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - schema = b.effectiveSchema(schema) - content, diags := b.original.Content(schema) - return b.fixupContent(content), diags -} - -func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - schema = b.effectiveSchema(schema) - content, remain, diags := b.original.PartialContent(schema) - remain = &fixupBody{ - original: remain, - schema: b.schema, - names: b.names, - } - return b.fixupContent(content), remain, diags -} - -func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - // FixUpBlockAttrs is not intended to be used in situations where we'd use - // JustAttributes, so we just pass this through verbatim to complete our - // implementation of hcl.Body. - return b.original.JustAttributes() -} - -func (b *fixupBody) MissingItemRange() hcl.Range { - return b.original.MissingItemRange() -} - -// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's -// content to determine whether the author has used attribute or block syntax -// for each of the ambigious attributes where both are permitted. -// -// The resulting schema will always contain all of the same names that are -// in the given schema, but some attribute schemas may instead be replaced by -// block header schemas. -func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema { - return effectiveSchema(given, b.original, b.names, true) -} - -func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent { - var ret hcl.BodyContent - ret.Attributes = make(hcl.Attributes) - for name, attr := range content.Attributes { - ret.Attributes[name] = attr - } - blockAttrVals := make(map[string][]*hcl.Block) - for _, block := range content.Blocks { - if _, exists := b.names[block.Type]; exists { - // If we get here then we've found a block type whose instances need - // to be re-interpreted as a list-of-objects attribute. We'll gather - // those up and fix them up below. - blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block) - continue - } - - // We need to now re-wrap our inner body so it will be subject to the - // same attribute-as-block fixup when recursively decoded. - retBlock := *block // shallow copy - if blockS, ok := b.schema.BlockTypes[block.Type]; ok { - // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then - retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block) - } - - ret.Blocks = append(ret.Blocks, &retBlock) - } - // No we'll install synthetic attributes for each of our fixups. We can't - // do this exactly because HCL's information model expects an attribute - // to be a single decl but we have multiple separate blocks. We'll - // approximate things, then, by using only our first block for the source - // location information. (We are guaranteed at least one by the above logic.) - for name, blocks := range blockAttrVals { - ret.Attributes[name] = &hcl.Attribute{ - Name: name, - Expr: &fixupBlocksExpr{ - blocks: blocks, - ety: b.schema.Attributes[name].Type.ElementType(), - }, - - Range: blocks[0].DefRange, - NameRange: blocks[0].TypeRange, - } - } - return &ret -} - -type fixupBlocksExpr struct { - blocks hcl.Blocks - ety cty.Type -} - -func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - // In order to produce a suitable value for our expression we need to - // now decode the whole descendent block structure under each of our block - // bodies. - // - // That requires us to do something rather strange: we must construct a - // synthetic block type schema derived from the element type of the - // attribute, thus inverting our usual direction of lowering a schema - // into an implied type. Because a type is less detailed than a schema, - // the result is imprecise and in particular will just consider all - // the attributes to be optional and let the provider eventually decide - // whether to return errors if they turn out to be null when required. - schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety - spec := schema.DecoderSpec() - - vals := make([]cty.Value, len(e.blocks)) - var diags hcl.Diagnostics - for i, block := range e.blocks { - body := FixUpBlockAttrs(block.Body, schema) - val, blockDiags := hcldec.Decode(body, spec, ctx) - diags = append(diags, blockDiags...) - if val == cty.NilVal { - val = cty.UnknownVal(e.ety) - } - vals[i] = val - } - if len(vals) == 0 { - return cty.ListValEmpty(e.ety), diags - } - return cty.ListVal(vals), diags -} - -func (e *fixupBlocksExpr) Variables() []hcl.Traversal { - var ret []hcl.Traversal - schema := SchemaForCtyElementType(e.ety) - spec := schema.DecoderSpec() - for _, block := range e.blocks { - ret = append(ret, hcldec.Variables(block.Body, spec)...) - } - return ret -} - -func (e *fixupBlocksExpr) Range() hcl.Range { - // This is not really an appropriate range for the expression but it's - // the best we can do from here. - return e.blocks[0].DefRange -} - -func (e *fixupBlocksExpr) StartRange() hcl.Range { - return e.blocks[0].DefRange -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go deleted file mode 100644 index 129ee0e8..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go +++ /dev/null @@ -1,119 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -func ambiguousNames(schema *configschema.Block) map[string]struct{} { - if schema == nil { - return nil - } - ambiguousNames := make(map[string]struct{}) - for name, attrS := range schema.Attributes { - aty := attrS.Type - if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { - ambiguousNames[name] = struct{}{} - } - } - return ambiguousNames -} - -func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { - ret := &hcl.BodySchema{} - - appearsAsBlock := make(map[string]struct{}) - { - // We'll construct some throwaway schemas here just to probe for - // whether each of our ambiguous names seems to be being used as - // an attribute or a block. We need to check both because in JSON - // syntax we rely on the schema to decide between attribute or block - // interpretation and so JSON will always answer yes to both of - // these questions and we want to prefer the attribute interpretation - // in that case. - var probeSchema hcl.BodySchema - - for name := range ambiguousNames { - probeSchema = hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: name, - }, - }, - } - content, _, _ := body.PartialContent(&probeSchema) - if _, exists := content.Attributes[name]; exists { - // Can decode as an attribute, so we'll go with that. - continue - } - probeSchema = hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: name, - }, - }, - } - content, _, _ = body.PartialContent(&probeSchema) - if len(content.Blocks) > 0 || dynamicExpanded { - // A dynamic block with an empty iterator returns nothing. - // If there's no attribute and we have either a block or a - // dynamic expansion, we need to rewrite this one as a - // block for a successful result. - appearsAsBlock[name] = struct{}{} - } - } - if !dynamicExpanded { - // If we're deciding for a context where dynamic blocks haven't - // been expanded yet then we need to probe for those too. - probeSchema = hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "dynamic", - LabelNames: []string{"type"}, - }, - }, - } - content, _, _ := body.PartialContent(&probeSchema) - for _, block := range content.Blocks { - if _, exists := ambiguousNames[block.Labels[0]]; exists { - appearsAsBlock[block.Labels[0]] = struct{}{} - } - } - } - } - - for _, attrS := range given.Attributes { - if _, exists := appearsAsBlock[attrS.Name]; exists { - ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ - Type: attrS.Name, - }) - } else { - ret.Attributes = append(ret.Attributes, attrS) - } - } - - // Anything that is specified as a block type in the input schema remains - // that way by just passing through verbatim. - ret.Blocks = append(ret.Blocks, given.Blocks...) - - return ret -} - -// SchemaForCtyElementType converts a cty object type into an -// approximately-equivalent configschema.Block representing the element of -// a list or set. If the given type is not an object type then this -// function will panic. -func SchemaForCtyElementType(ty cty.Type) *configschema.Block { - atys := ty.AttributeTypes() - ret := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute, len(atys)), - } - for name, aty := range atys { - ret.Attributes[name] = &configschema.Attribute{ - Type: aty, - Optional: true, - } - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go deleted file mode 100644 index f5ed1c53..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go +++ /dev/null @@ -1,45 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/dynblock" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" -) - -// ExpandedVariables finds all of the global variables referenced in the -// given body with the given schema while taking into account the possibilities -// both of "dynamic" blocks being expanded and the possibility of certain -// attributes being written instead as nested blocks as allowed by the -// FixUpBlockAttrs function. -// -// This function exists to allow variables to be analyzed prior to dynamic -// block expansion while also dealing with the fact that dynamic block expansion -// might in turn produce nested blocks that are subject to FixUpBlockAttrs. -// -// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, -// which is itself a drop-in replacement for hcldec.Variables. -func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { - rootNode := dynblock.WalkVariables(body) - return walkVariables(rootNode, body, schema) -} - -func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { - givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) - ambiguousNames := ambiguousNames(schema) - effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) - vars, children := node.Visit(effectiveRawSchema) - - for _, child := range children { - if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { - vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) - } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { - // ☝️Check for collection type before element type, because if this is a mis-placed reference, - // a panic here will prevent other useful diags from being elevated to show the user what to fix - synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) - vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) - } - } - - return vars -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go deleted file mode 100644 index 13f7ed93..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go +++ /dev/null @@ -1,34 +0,0 @@ -package lang - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Data is an interface whose implementations can provide cty.Value -// representations of objects identified by referenceable addresses from -// the addrs package. -// -// This interface will grow each time a new type of reference is added, and so -// implementations outside of the Terraform codebases are not advised. -// -// Each method returns a suitable value and optionally some diagnostics. If the -// returned diagnostics contains errors then the type of the returned value is -// used to construct an unknown value of the same type which is then used in -// place of the requested object so that type checking can still proceed. In -// cases where it's not possible to even determine a suitable result type, -// cty.DynamicVal is returned along with errors describing the problem. -type Data interface { - StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics - - GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetResource(addrs.Resource, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go deleted file mode 100644 index af5c5cac..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package lang deals with the runtime aspects of Terraform's configuration -// language, with concerns such as expression evaluation. It is closely related -// to sibling package "configs", which is responsible for configuration -// parsing and static validation. -package lang diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go deleted file mode 100644 index ec48a873..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go +++ /dev/null @@ -1,473 +0,0 @@ -package lang - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/dynblock" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// ExpandBlock expands any "dynamic" blocks present in the given body. The -// result is a body with those blocks expanded, ready to be evaluated with -// EvalBlock. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. -func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { - spec := schema.DecoderSpec() - - traversals := dynblock.ExpandVariablesHCLDec(body, spec) - refs, diags := References(traversals) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - - return dynblock.Expand(body, ctx), diags -} - -// EvalBlock evaluates the given body using the given block schema and returns -// a cty object value representing its contents. The type of the result conforms -// to the implied type of the given schema. -// -// This function does not automatically expand "dynamic" blocks within the -// body. If that is desired, first call the ExpandBlock method to obtain -// an expanded body to pass to this method. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. -func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { - spec := schema.DecoderSpec() - - refs, diags := ReferencesInBlock(body, schema) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - if diags.HasErrors() { - // We'll stop early if we found problems in the references, because - // it's likely evaluation will produce redundant copies of the same errors. - return cty.UnknownVal(schema.ImpliedType()), diags - } - - // HACK: In order to remain compatible with some assumptions made in - // Terraform v0.11 and earlier about the approximate equivalence of - // attribute vs. block syntax, we do a just-in-time fixup here to allow - // any attribute in the schema that has a list-of-objects or set-of-objects - // kind to potentially be populated instead by one or more nested blocks - // whose type is the attribute name. - body = blocktoattr.FixUpBlockAttrs(body, schema) - - val, evalDiags := hcldec.Decode(body, spec, ctx) - diags = diags.Append(evalDiags) - - return val, diags -} - -// EvalExpr evaluates a single expression in the receiving context and returns -// the resulting value. The value will be converted to the given type before -// it is returned if possible, or else an error diagnostic will be produced -// describing the conversion error. -// -// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion -// and just obtain the returned value directly. -// -// If the returned diagnostics contains errors then the result may be -// incomplete, but will always be of the requested type. -func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { - refs, diags := ReferencesInExpr(expr) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - if diags.HasErrors() { - // We'll stop early if we found problems in the references, because - // it's likely evaluation will produce redundant copies of the same errors. - return cty.UnknownVal(wantType), diags - } - - val, evalDiags := expr.Value(ctx) - diags = diags.Append(evalDiags) - - if wantType != cty.DynamicPseudoType { - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - val = cty.UnknownVal(wantType) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect value type", - Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), - Subject: expr.Range().Ptr(), - }) - } - } - - return val, diags -} - -// EvalReference evaluates the given reference in the receiving scope and -// returns the resulting value. The value will be converted to the given type before -// it is returned if possible, or else an error diagnostic will be produced -// describing the conversion error. -// -// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion -// and just obtain the returned value directly. -// -// If the returned diagnostics contains errors then the result may be -// incomplete, but will always be of the requested type. -func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // We cheat a bit here and just build an EvalContext for our requested - // reference with the "self" address overridden, and then pull the "self" - // result out of it to return. - ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject) - diags = diags.Append(ctxDiags) - val := ctx.Variables["self"] - if val == cty.NilVal { - val = cty.DynamicVal - } - - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - val = cty.UnknownVal(wantType) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect value type", - Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - - return val, diags -} - -// EvalContext constructs a HCL expression evaluation context whose variable -// scope contains sufficient values to satisfy the given set of references. -// -// Most callers should prefer to use the evaluation helper methods that -// this type offers, but this is here for less common situations where the -// caller will handle the evaluation calls itself. -func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { - return s.evalContext(refs, s.SelfAddr) -} - -func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) { - if s == nil { - panic("attempt to construct EvalContext for nil Scope") - } - - var diags tfdiags.Diagnostics - vals := make(map[string]cty.Value) - funcs := s.Functions() - ctx := &hcl.EvalContext{ - Variables: vals, - Functions: funcs, - } - - if len(refs) == 0 { - // Easy path for common case where there are no references at all. - return ctx, diags - } - - // First we'll do static validation of the references. This catches things - // early that might otherwise not get caught due to unknown values being - // present in the scope during planning. - if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() { - diags = diags.Append(staticDiags) - return ctx, diags - } - - // The reference set we are given has not been de-duped, and so there can - // be redundant requests in it for two reasons: - // - The same item is referenced multiple times - // - Both an item and that item's container are separately referenced. - // We will still visit every reference here and ask our data source for - // it, since that allows us to gather a full set of any errors and - // warnings, but once we've gathered all the data we'll then skip anything - // that's redundant in the process of populating our values map. - dataResources := map[string]map[string]cty.Value{} - managedResources := map[string]map[string]cty.Value{} - wholeModules := map[string]map[addrs.InstanceKey]cty.Value{} - moduleOutputs := map[string]map[addrs.InstanceKey]map[string]cty.Value{} - inputVariables := map[string]cty.Value{} - localValues := map[string]cty.Value{} - pathAttrs := map[string]cty.Value{} - terraformAttrs := map[string]cty.Value{} - countAttrs := map[string]cty.Value{} - forEachAttrs := map[string]cty.Value{} - var self cty.Value - - for _, ref := range refs { - rng := ref.SourceRange - - rawSubj := ref.Subject - if rawSubj == addrs.Self { - if selfAddr == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "self" reference`, - // This detail message mentions some current practice that - // this codepath doesn't really "know about". If the "self" - // object starts being supported in more contexts later then - // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - continue - } - - if selfAddr == addrs.Self { - // Programming error: the self address cannot alias itself. - panic("scope SelfAddr attempting to alias itself") - } - - // self can only be used within a resource instance - subj := selfAddr.(addrs.ResourceInstance) - - val, valDiags := normalizeRefValue(s.Data.GetResource(subj.ContainingResource(), rng)) - - diags = diags.Append(valDiags) - - // Self is an exception in that it must always resolve to a - // particular instance. We will still insert the full resource into - // the context below. - switch k := subj.Key.(type) { - case addrs.IntKey: - self = val.Index(cty.NumberIntVal(int64(k))) - case addrs.StringKey: - self = val.Index(cty.StringVal(string(k))) - default: - self = val - } - - continue - } - - // This type switch must cover all of the "Referenceable" implementations - // in package addrs, however we are removing the possibility of - // ResourceInstance beforehand. - if addr, ok := rawSubj.(addrs.ResourceInstance); ok { - rawSubj = addr.ContainingResource() - } - - switch subj := rawSubj.(type) { - case addrs.Resource: - var into map[string]map[string]cty.Value - switch subj.Mode { - case addrs.ManagedResourceMode: - into = managedResources - case addrs.DataResourceMode: - into = dataResources - default: - panic(fmt.Errorf("unsupported ResourceMode %s", subj.Mode)) - } - - val, valDiags := normalizeRefValue(s.Data.GetResource(subj, rng)) - diags = diags.Append(valDiags) - - r := subj - if into[r.Type] == nil { - into[r.Type] = make(map[string]cty.Value) - } - into[r.Type][r.Name] = val - - case addrs.ModuleCallInstance: - val, valDiags := normalizeRefValue(s.Data.GetModuleInstance(subj, rng)) - diags = diags.Append(valDiags) - - if wholeModules[subj.Call.Name] == nil { - wholeModules[subj.Call.Name] = make(map[addrs.InstanceKey]cty.Value) - } - wholeModules[subj.Call.Name][subj.Key] = val - - case addrs.ModuleCallOutput: - val, valDiags := normalizeRefValue(s.Data.GetModuleInstanceOutput(subj, rng)) - diags = diags.Append(valDiags) - - callName := subj.Call.Call.Name - callKey := subj.Call.Key - if moduleOutputs[callName] == nil { - moduleOutputs[callName] = make(map[addrs.InstanceKey]map[string]cty.Value) - } - if moduleOutputs[callName][callKey] == nil { - moduleOutputs[callName][callKey] = make(map[string]cty.Value) - } - moduleOutputs[callName][callKey][subj.Name] = val - - case addrs.InputVariable: - val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng)) - diags = diags.Append(valDiags) - inputVariables[subj.Name] = val - - case addrs.LocalValue: - val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng)) - diags = diags.Append(valDiags) - localValues[subj.Name] = val - - case addrs.PathAttr: - val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng)) - diags = diags.Append(valDiags) - pathAttrs[subj.Name] = val - - case addrs.TerraformAttr: - val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng)) - diags = diags.Append(valDiags) - terraformAttrs[subj.Name] = val - - case addrs.CountAttr: - val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng)) - diags = diags.Append(valDiags) - countAttrs[subj.Name] = val - - case addrs.ForEachAttr: - val, valDiags := normalizeRefValue(s.Data.GetForEachAttr(subj, rng)) - diags = diags.Append(valDiags) - forEachAttrs[subj.Name] = val - - default: - // Should never happen - panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) - } - } - - for k, v := range buildResourceObjects(managedResources) { - vals[k] = v - } - vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources)) - vals["module"] = cty.ObjectVal(buildModuleObjects(wholeModules, moduleOutputs)) - vals["var"] = cty.ObjectVal(inputVariables) - vals["local"] = cty.ObjectVal(localValues) - vals["path"] = cty.ObjectVal(pathAttrs) - vals["terraform"] = cty.ObjectVal(terraformAttrs) - vals["count"] = cty.ObjectVal(countAttrs) - vals["each"] = cty.ObjectVal(forEachAttrs) - if self != cty.NilVal { - vals["self"] = self - } - - return ctx, diags -} - -func buildResourceObjects(resources map[string]map[string]cty.Value) map[string]cty.Value { - vals := make(map[string]cty.Value) - for typeName, nameVals := range resources { - vals[typeName] = cty.ObjectVal(nameVals) - } - return vals -} - -func buildModuleObjects(wholeModules map[string]map[addrs.InstanceKey]cty.Value, moduleOutputs map[string]map[addrs.InstanceKey]map[string]cty.Value) map[string]cty.Value { - vals := make(map[string]cty.Value) - - for name, keys := range wholeModules { - vals[name] = buildInstanceObjects(keys) - } - - for name, keys := range moduleOutputs { - if _, exists := wholeModules[name]; exists { - // If we also have a whole module value for this name then we'll - // skip this since the individual outputs are embedded in that result. - continue - } - - // The shape of this collection isn't compatible with buildInstanceObjects, - // but rather than replicating most of the buildInstanceObjects logic - // here we'll instead first transform the structure to be what that - // function expects and then use it. This is a little wasteful, but - // we do not expect this these maps to be large and so the extra work - // here should not hurt too much. - flattened := make(map[addrs.InstanceKey]cty.Value, len(keys)) - for k, vals := range keys { - flattened[k] = cty.ObjectVal(vals) - } - vals[name] = buildInstanceObjects(flattened) - } - - return vals -} - -func buildInstanceObjects(keys map[addrs.InstanceKey]cty.Value) cty.Value { - if val, exists := keys[addrs.NoKey]; exists { - // If present, a "no key" value supersedes all other values, - // since they should be embedded inside it. - return val - } - - // If we only have individual values then we need to construct - // either a list or a map, depending on what sort of keys we - // have. - haveInt := false - haveString := false - maxInt := 0 - - for k := range keys { - switch tk := k.(type) { - case addrs.IntKey: - haveInt = true - if int(tk) > maxInt { - maxInt = int(tk) - } - case addrs.StringKey: - haveString = true - } - } - - // We should either have ints or strings and not both, but - // if we have both then we'll prefer strings and let the - // language interpreter try to convert the int keys into - // strings in a map. - switch { - case haveString: - vals := make(map[string]cty.Value) - for k, v := range keys { - switch tk := k.(type) { - case addrs.StringKey: - vals[string(tk)] = v - case addrs.IntKey: - sk := strconv.Itoa(int(tk)) - vals[sk] = v - } - } - return cty.ObjectVal(vals) - case haveInt: - // We'll make a tuple that is long enough for our maximum - // index value. It doesn't matter if we end up shorter than - // the number of instances because if length(...) were - // being evaluated we would've got a NoKey reference and - // thus not ended up in this codepath at all. - vals := make([]cty.Value, maxInt+1) - for i := range vals { - if v, exists := keys[addrs.IntKey(i)]; exists { - vals[i] = v - } else { - // Just a placeholder, since nothing will access this anyway - vals[i] = cty.DynamicVal - } - } - return cty.TupleVal(vals) - default: - // Should never happen because there are no other key types. - log.Printf("[ERROR] strange makeInstanceObjects call with no supported key types") - return cty.EmptyObjectVal - } -} - -func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) { - if diags.HasErrors() { - // If there are errors then we will force an unknown result so that - // we can still evaluate and catch type errors but we'll avoid - // producing redundant re-statements of the same errors we've already - // dealt with here. - return cty.UnknownVal(val.Type()), diags - } - return val, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go deleted file mode 100644 index 8c075148..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go +++ /dev/null @@ -1,218 +0,0 @@ -package funcs - -import ( - "fmt" - "net" - - "github.com/apparentlymart/go-cidr/cidr" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -// CidrHostFunc contructs a function that calculates a full host IP address -// within a given IP network address prefix. -var CidrHostFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - { - Name: "hostnum", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var hostNum int - if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { - return cty.UnknownVal(cty.String), err - } - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - ip, err := cidr.Host(network, hostNum) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(ip.String()), nil - }, -}) - -// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given -// in CIDR notation into a subnet mask address. -var CidrNetmaskFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - return cty.StringVal(net.IP(network.Mask).String()), nil - }, -}) - -// CidrSubnetFunc contructs a function that calculates a subnet address within -// a given IP network address prefix. -var CidrSubnetFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - { - Name: "newbits", - Type: cty.Number, - }, - { - Name: "netnum", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var newbits int - if err := gocty.FromCtyValue(args[1], &newbits); err != nil { - return cty.UnknownVal(cty.String), err - } - var netnum int - if err := gocty.FromCtyValue(args[2], &netnum); err != nil { - return cty.UnknownVal(cty.String), err - } - - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - // For portability with 32-bit systems where the subnet number - // will be a 32-bit int, we only allow extension of 32 bits in - // one call even if we're running on a 64-bit machine. - // (Of course, this is significant only for IPv6.) - if newbits > 32 { - return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits") - } - - newNetwork, err := cidr.Subnet(network, newbits, netnum) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(newNetwork.String()), nil - }, -}) - -// CidrSubnetsFunc is similar to CidrSubnetFunc but calculates many consecutive -// subnet addresses at once, rather than just a single subnet extension. -var CidrSubnetsFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "newbits", - Type: cty.Number, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err) - } - startPrefixLen, _ := network.Mask.Size() - - prefixLengthArgs := args[1:] - if len(prefixLengthArgs) == 0 { - return cty.ListValEmpty(cty.String), nil - } - - var firstLength int - if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(1, err) - } - firstLength += startPrefixLen - - retVals := make([]cty.Value, len(prefixLengthArgs)) - - current, _ := cidr.PreviousSubnet(network, firstLength) - for i, lengthArg := range prefixLengthArgs { - var length int - if err := gocty.FromCtyValue(lengthArg, &length); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(i+1, err) - } - - if length < 1 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit") - } - // For portability with 32-bit systems where the subnet number - // will be a 32-bit int, we only allow extension of 32 bits in - // one call even if we're running on a 64-bit machine. - // (Of course, this is significant only for IPv6.) - if length > 32 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits") - } - length += startPrefixLen - if length > (len(network.IP) * 8) { - protocol := "IP" - switch len(network.IP) * 8 { - case 32: - protocol = "IPv4" - case 128: - protocol = "IPv6" - } - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol) - } - - next, rollover := cidr.NextSubnet(current, length) - if rollover || !network.Contains(next.IP) { - // If we run out of suffix bits in the base CIDR prefix then - // NextSubnet will start incrementing the prefix bits, which - // we don't allow because it would then allocate addresses - // outside of the caller's given prefix. - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String()) - } - - current = next - retVals[i] = cty.StringVal(current.String()) - } - - return cty.ListVal(retVals), nil - }, -}) - -// CidrHost calculates a full host IP address within a given IP network address prefix. -func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { - return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) -} - -// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. -func CidrNetmask(prefix cty.Value) (cty.Value, error) { - return CidrNetmaskFunc.Call([]cty.Value{prefix}) -} - -// CidrSubnet calculates a subnet address within a given IP network address prefix. -func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { - return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) -} - -// CidrSubnets calculates a sequence of consecutive subnet prefixes that may -// be of different prefix lengths under a common base prefix. -func CidrSubnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(newbits)+1) - args[0] = prefix - copy(args[1:], newbits) - return CidrSubnetsFunc.Call(args) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go deleted file mode 100644 index e6898457..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go +++ /dev/null @@ -1,1519 +0,0 @@ -package funcs - -import ( - "errors" - "fmt" - "sort" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/function/stdlib" - "github.com/zclconf/go-cty/cty/gocty" -) - -var ElementFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "index", - Type: cty.Number, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - list := args[0] - listTy := list.Type() - switch { - case listTy.IsListType(): - return listTy.ElementType(), nil - case listTy.IsTupleType(): - if !args[1].IsKnown() { - // If the index isn't known yet then we can't predict the - // result type since each tuple element can have its own type. - return cty.DynamicPseudoType, nil - } - - etys := listTy.TupleElementTypes() - var index int - err := gocty.FromCtyValue(args[1], &index) - if err != nil { - // e.g. fractional number where whole number is required - return cty.DynamicPseudoType, fmt.Errorf("invalid index: %s", err) - } - if len(etys) == 0 { - return cty.DynamicPseudoType, errors.New("cannot use element function with an empty list") - } - index = index % len(etys) - return etys[index], nil - default: - return cty.DynamicPseudoType, fmt.Errorf("cannot read elements from %s", listTy.FriendlyName()) - } - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - var index int - err := gocty.FromCtyValue(args[1], &index) - if err != nil { - // can't happen because we checked this in the Type function above - return cty.DynamicVal, fmt.Errorf("invalid index: %s", err) - } - - if !args[0].IsKnown() { - return cty.UnknownVal(retType), nil - } - - l := args[0].LengthInt() - if l == 0 { - return cty.DynamicVal, errors.New("cannot use element function with an empty list") - } - index = index % l - - // We did all the necessary type checks in the type function above, - // so this is guaranteed not to fail. - return args[0].Index(cty.NumberIntVal(int64(index))), nil - }, -}) - -var LengthFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowDynamicType: true, - AllowUnknown: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - collTy := args[0].Type() - switch { - case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: - return cty.Number, nil - default: - return cty.Number, errors.New("argument must be a string, a collection type, or a structural type") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - coll := args[0] - collTy := args[0].Type() - switch { - case collTy == cty.DynamicPseudoType: - return cty.UnknownVal(cty.Number), nil - case collTy.IsTupleType(): - l := len(collTy.TupleElementTypes()) - return cty.NumberIntVal(int64(l)), nil - case collTy.IsObjectType(): - l := len(collTy.AttributeTypes()) - return cty.NumberIntVal(int64(l)), nil - case collTy == cty.String: - // We'll delegate to the cty stdlib strlen function here, because - // it deals with all of the complexities of tokenizing unicode - // grapheme clusters. - return stdlib.Strlen(coll) - case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): - return coll.Length(), nil - default: - // Should never happen, because of the checks in our Type func above - return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)") - } - }, -}) - -// CoalesceFunc constructs a function that takes any number of arguments and -// returns the first one that isn't empty. This function was copied from go-cty -// stdlib and modified so that it returns the first *non-empty* non-null element -// from a sequence, instead of merely the first non-null. -var CoalesceFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - argTypes := make([]cty.Type, len(args)) - for i, val := range args { - argTypes[i] = val.Type() - } - retType, _ := convert.UnifyUnsafe(argTypes) - if retType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - return retType, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, argVal := range args { - // We already know this will succeed because of the checks in our Type func above - argVal, _ = convert.Convert(argVal, retType) - if !argVal.IsKnown() { - return cty.UnknownVal(retType), nil - } - if argVal.IsNull() { - continue - } - if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { - continue - } - - return argVal, nil - } - return cty.NilVal, errors.New("no non-null, non-empty-string arguments") - }, -}) - -// CoalesceListFunc constructs a function that takes any number of list arguments -// and returns the first one that isn't empty. -var CoalesceListFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) == 0 { - return cty.NilType, errors.New("at least one argument is required") - } - - argTypes := make([]cty.Type, len(args)) - - for i, arg := range args { - // if any argument is unknown, we can't be certain know which type we will return - if !arg.IsKnown() { - return cty.DynamicPseudoType, nil - } - ty := arg.Type() - - if !ty.IsListType() && !ty.IsTupleType() { - return cty.NilType, errors.New("coalescelist arguments must be lists or tuples") - } - - argTypes[i] = arg.Type() - } - - last := argTypes[0] - // If there are mixed types, we have to return a dynamic type. - for _, next := range argTypes[1:] { - if !next.Equals(last) { - return cty.DynamicPseudoType, nil - } - } - - return last, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, arg := range args { - if !arg.IsKnown() { - // If we run into an unknown list at some point, we can't - // predict the final result yet. (If there's a known, non-empty - // arg before this then we won't get here.) - return cty.UnknownVal(retType), nil - } - - if arg.LengthInt() > 0 { - return arg, nil - } - } - - return cty.NilVal, errors.New("no non-null arguments") - }, -}) - -// CompactFunc constructs a function that takes a list of strings and returns a new list -// with any empty string elements removed. -var CompactFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.String), - }, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - listVal := args[0] - if !listVal.IsWhollyKnown() { - // If some of the element values aren't known yet then we - // can't yet return a compacted list - return cty.UnknownVal(retType), nil - } - - var outputList []cty.Value - - for it := listVal.ElementIterator(); it.Next(); { - _, v := it.Element() - if v.IsNull() || v.AsString() == "" { - continue - } - outputList = append(outputList, v) - } - - if len(outputList) == 0 { - return cty.ListValEmpty(cty.String), nil - } - - return cty.ListVal(outputList), nil - }, -}) - -// ContainsFunc constructs a function that determines whether a given list or -// set contains a given single value as one of its elements. -var ContainsFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "value", - Type: cty.DynamicPseudoType, - }, - }, - Type: function.StaticReturnType(cty.Bool), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - arg := args[0] - ty := arg.Type() - - if !ty.IsListType() && !ty.IsTupleType() && !ty.IsSetType() { - return cty.NilVal, errors.New("argument must be list, tuple, or set") - } - - _, err = Index(cty.TupleVal(arg.AsValueSlice()), args[1]) - if err != nil { - return cty.False, nil - } - - return cty.True, nil - }, -}) - -// IndexFunc constructs a function that finds the element index for a given value in a list. -var IndexFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "value", - Type: cty.DynamicPseudoType, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { - return cty.NilVal, errors.New("argument must be a list or tuple") - } - - if !args[0].IsKnown() { - return cty.UnknownVal(cty.Number), nil - } - - if args[0].LengthInt() == 0 { // Easy path - return cty.NilVal, errors.New("cannot search an empty list") - } - - for it := args[0].ElementIterator(); it.Next(); { - i, v := it.Element() - eq, err := stdlib.Equal(v, args[1]) - if err != nil { - return cty.NilVal, err - } - if !eq.IsKnown() { - return cty.UnknownVal(cty.Number), nil - } - if eq.True() { - return i, nil - } - } - return cty.NilVal, errors.New("item not found") - - }, -}) - -// DistinctFunc constructs a function that takes a list and returns a new list -// with any duplicate elements removed. -var DistinctFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.DynamicPseudoType), - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - return args[0].Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - listVal := args[0] - - if !listVal.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - var list []cty.Value - - for it := listVal.ElementIterator(); it.Next(); { - _, v := it.Element() - list, err = appendIfMissing(list, v) - if err != nil { - return cty.NilVal, err - } - } - - if len(list) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(list), nil - }, -}) - -// ChunklistFunc constructs a function that splits a single list into fixed-size chunks, -// returning a list of lists. -var ChunklistFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "size", - Type: cty.Number, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - return cty.List(args[0].Type()), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - listVal := args[0] - if !listVal.IsKnown() { - return cty.UnknownVal(retType), nil - } - - if listVal.LengthInt() == 0 { - return cty.ListValEmpty(listVal.Type()), nil - } - - var size int - err = gocty.FromCtyValue(args[1], &size) - if err != nil { - return cty.NilVal, fmt.Errorf("invalid index: %s", err) - } - - if size < 0 { - return cty.NilVal, errors.New("the size argument must be positive") - } - - output := make([]cty.Value, 0) - - // if size is 0, returns a list made of the initial list - if size == 0 { - output = append(output, listVal) - return cty.ListVal(output), nil - } - - chunk := make([]cty.Value, 0) - - l := args[0].LengthInt() - i := 0 - - for it := listVal.ElementIterator(); it.Next(); { - _, v := it.Element() - chunk = append(chunk, v) - - // Chunk when index isn't 0, or when reaching the values's length - if (i+1)%size == 0 || (i+1) == l { - output = append(output, cty.ListVal(chunk)) - chunk = make([]cty.Value, 0) - } - i++ - } - - return cty.ListVal(output), nil - }, -}) - -// FlattenFunc constructs a function that takes a list and replaces any elements -// that are lists with a flattened sequence of the list contents. -var FlattenFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].IsWhollyKnown() { - return cty.DynamicPseudoType, nil - } - - argTy := args[0].Type() - if !argTy.IsListType() && !argTy.IsSetType() && !argTy.IsTupleType() { - return cty.NilType, errors.New("can only flatten lists, sets and tuples") - } - - retVal, known := flattener(args[0]) - if !known { - return cty.DynamicPseudoType, nil - } - - tys := make([]cty.Type, len(retVal)) - for i, ty := range retVal { - tys[i] = ty.Type() - } - return cty.Tuple(tys), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputList := args[0] - if inputList.LengthInt() == 0 { - return cty.EmptyTupleVal, nil - } - - out, known := flattener(inputList) - if !known { - return cty.UnknownVal(retType), nil - } - - return cty.TupleVal(out), nil - }, -}) - -// Flatten until it's not a cty.List, and return whether the value is known. -// We can flatten lists with unknown values, as long as they are not -// lists themselves. -func flattener(flattenList cty.Value) ([]cty.Value, bool) { - out := make([]cty.Value, 0) - for it := flattenList.ElementIterator(); it.Next(); { - _, val := it.Element() - if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() { - if !val.IsKnown() { - return out, false - } - - res, known := flattener(val) - if !known { - return res, known - } - out = append(out, res...) - } else { - out = append(out, val) - } - } - return out, true -} - -// KeysFunc constructs a function that takes a map and returns a sorted list of the map keys. -var KeysFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "inputMap", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - ty := args[0].Type() - switch { - case ty.IsMapType(): - return cty.List(cty.String), nil - case ty.IsObjectType(): - atys := ty.AttributeTypes() - if len(atys) == 0 { - return cty.EmptyTuple, nil - } - // All of our result elements will be strings, and atys just - // decides how many there are. - etys := make([]cty.Type, len(atys)) - for i := range etys { - etys[i] = cty.String - } - return cty.Tuple(etys), nil - default: - return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - m := args[0] - var keys []cty.Value - - switch { - case m.Type().IsObjectType(): - // In this case we allow unknown values so we must work only with - // the attribute _types_, not with the value itself. - var names []string - for name := range m.Type().AttributeTypes() { - names = append(names, name) - } - sort.Strings(names) // same ordering guaranteed by cty's ElementIterator - if len(names) == 0 { - return cty.EmptyTupleVal, nil - } - keys = make([]cty.Value, len(names)) - for i, name := range names { - keys[i] = cty.StringVal(name) - } - return cty.TupleVal(keys), nil - default: - if !m.IsKnown() { - return cty.UnknownVal(retType), nil - } - - // cty guarantees that ElementIterator will iterate in lexicographical - // order by key. - for it := args[0].ElementIterator(); it.Next(); { - k, _ := it.Element() - keys = append(keys, k) - } - if len(keys) == 0 { - return cty.ListValEmpty(cty.String), nil - } - return cty.ListVal(keys), nil - } - }, -}) - -// ListFunc constructs a function that takes an arbitrary number of arguments -// and returns a list containing those values in the same order. -// -// This function is deprecated in Terraform v0.12 -var ListFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) == 0 { - return cty.NilType, errors.New("at least one argument is required") - } - - argTypes := make([]cty.Type, len(args)) - - for i, arg := range args { - argTypes[i] = arg.Type() - } - - retType, _ := convert.UnifyUnsafe(argTypes) - if retType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - - return cty.List(retType), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - newList := make([]cty.Value, 0, len(args)) - - for _, arg := range args { - // We already know this will succeed because of the checks in our Type func above - arg, _ = convert.Convert(arg, retType.ElementType()) - newList = append(newList, arg) - } - - return cty.ListVal(newList), nil - }, -}) - -// LookupFunc constructs a function that performs dynamic lookups of map types. -var LookupFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "inputMap", - Type: cty.DynamicPseudoType, - }, - { - Name: "key", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "default", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) < 1 || len(args) > 3 { - return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) - } - - ty := args[0].Type() - - switch { - case ty.IsObjectType(): - if !args[1].IsKnown() { - return cty.DynamicPseudoType, nil - } - - key := args[1].AsString() - if ty.HasAttribute(key) { - return args[0].GetAttr(key).Type(), nil - } else if len(args) == 3 { - // if the key isn't found but a default is provided, - // return the default type - return args[2].Type(), nil - } - return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) - case ty.IsMapType(): - if len(args) == 3 { - _, err = convert.Convert(args[2], ty.ElementType()) - if err != nil { - return cty.NilType, function.NewArgErrorf(2, "the default value must have the same type as the map elements") - } - } - return ty.ElementType(), nil - default: - return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var defaultVal cty.Value - defaultValueSet := false - - if len(args) == 3 { - defaultVal = args[2] - defaultValueSet = true - } - - mapVar := args[0] - lookupKey := args[1].AsString() - - if !mapVar.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - if mapVar.Type().IsObjectType() { - if mapVar.Type().HasAttribute(lookupKey) { - return mapVar.GetAttr(lookupKey), nil - } - } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { - return mapVar.Index(cty.StringVal(lookupKey)), nil - } - - if defaultValueSet { - defaultVal, err = convert.Convert(defaultVal, retType) - if err != nil { - return cty.NilVal, err - } - return defaultVal, nil - } - - return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf( - "lookup failed to find '%s'", lookupKey) - }, -}) - -// MapFunc constructs a function that takes an even number of arguments and -// returns a map whose elements are constructed from consecutive pairs of arguments. -// -// This function is deprecated in Terraform v0.12 -var MapFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) < 2 || len(args)%2 != 0 { - return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args)) - } - - argTypes := make([]cty.Type, len(args)/2) - index := 0 - - for i := 0; i < len(args); i += 2 { - argTypes[index] = args[i+1].Type() - index++ - } - - valType, _ := convert.UnifyUnsafe(argTypes) - if valType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - - return cty.Map(valType), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, arg := range args { - if !arg.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - } - - outputMap := make(map[string]cty.Value) - - for i := 0; i < len(args); i += 2 { - - key := args[i].AsString() - - err := gocty.FromCtyValue(args[i], &key) - if err != nil { - return cty.NilVal, err - } - - val := args[i+1] - - var variable cty.Value - err = gocty.FromCtyValue(val, &variable) - if err != nil { - return cty.NilVal, err - } - - // We already know this will succeed because of the checks in our Type func above - variable, _ = convert.Convert(variable, retType.ElementType()) - - // Check for duplicate keys - if _, ok := outputMap[key]; ok { - return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) - } - outputMap[key] = variable - } - - return cty.MapVal(outputMap), nil - }, -}) - -// MatchkeysFunc constructs a function that constructs a new list by taking a -// subset of elements from one list whose indexes match the corresponding -// indexes of values in another list. -var MatchkeysFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "keys", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "searchset", - Type: cty.List(cty.DynamicPseudoType), - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) - if ty == cty.NilType { - return cty.NilType, errors.New("keys and searchset must be of the same type") - } - - // the return type is based on args[0] (values) - return args[0].Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - if !args[0].IsKnown() { - return cty.UnknownVal(cty.List(retType.ElementType())), nil - } - - if args[0].LengthInt() != args[1].LengthInt() { - return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal") - } - - output := make([]cty.Value, 0) - values := args[0] - - // Keys and searchset must be the same type. - // We can skip error checking here because we've already verified that - // they can be unified in the Type function - ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) - keys, _ := convert.Convert(args[1], ty) - searchset, _ := convert.Convert(args[2], ty) - - // if searchset is empty, return an empty list. - if searchset.LengthInt() == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - - if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - i := 0 - for it := keys.ElementIterator(); it.Next(); { - _, key := it.Element() - for iter := searchset.ElementIterator(); iter.Next(); { - _, search := iter.Element() - eq, err := stdlib.Equal(key, search) - if err != nil { - return cty.NilVal, err - } - if !eq.IsKnown() { - return cty.ListValEmpty(retType.ElementType()), nil - } - if eq.True() { - v := values.Index(cty.NumberIntVal(int64(i))) - output = append(output, v) - break - } - } - i++ - } - - // if we haven't matched any key, then output is an empty list. - if len(output) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(output), nil - }, -}) - -// MergeFunc constructs a function that takes an arbitrary number of maps and -// returns a single map that contains a merged set of elements from all of the maps. -// -// If more than one given map defines the same key then the one that is later in -// the argument sequence takes precedence. -var MergeFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "maps", - Type: cty.DynamicPseudoType, - AllowDynamicType: true, - }, - Type: function.StaticReturnType(cty.DynamicPseudoType), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - outputMap := make(map[string]cty.Value) - - for _, arg := range args { - if !arg.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - if !arg.Type().IsObjectType() && !arg.Type().IsMapType() { - return cty.NilVal, fmt.Errorf("arguments must be maps or objects, got %#v", arg.Type().FriendlyName()) - } - for it := arg.ElementIterator(); it.Next(); { - k, v := it.Element() - outputMap[k.AsString()] = v - } - } - return cty.ObjectVal(outputMap), nil - }, -}) - -// ReverseFunc takes a sequence and produces a new sequence of the same length -// with all of the same elements as the given sequence but in reverse order. -var ReverseFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - argTy := args[0].Type() - switch { - case argTy.IsTupleType(): - argTys := argTy.TupleElementTypes() - retTys := make([]cty.Type, len(argTys)) - for i, ty := range argTys { - retTys[len(retTys)-i-1] = ty - } - return cty.Tuple(retTys), nil - case argTy.IsListType(), argTy.IsSetType(): // We accept sets here to mimic the usual behavior of auto-converting to list - return cty.List(argTy.ElementType()), nil - default: - return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName()) - } - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - in := args[0].AsValueSlice() - outVals := make([]cty.Value, len(in)) - for i, v := range in { - outVals[len(outVals)-i-1] = v - } - switch { - case retType.IsTupleType(): - return cty.TupleVal(outVals), nil - default: - if len(outVals) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(outVals), nil - } - }, -}) - -// SetProductFunc calculates the cartesian product of two or more sets or -// sequences. If the arguments are all lists then the result is a list of tuples, -// preserving the ordering of all of the input lists. Otherwise the result is a -// set of tuples. -var SetProductFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "sets", - Type: cty.DynamicPseudoType, - }, - Type: func(args []cty.Value) (retType cty.Type, err error) { - if len(args) < 2 { - return cty.NilType, errors.New("at least two arguments are required") - } - - listCount := 0 - elemTys := make([]cty.Type, len(args)) - for i, arg := range args { - aty := arg.Type() - switch { - case aty.IsSetType(): - elemTys[i] = aty.ElementType() - case aty.IsListType(): - elemTys[i] = aty.ElementType() - listCount++ - case aty.IsTupleType(): - // We can accept a tuple type only if there's some common type - // that all of its elements can be converted to. - allEtys := aty.TupleElementTypes() - if len(allEtys) == 0 { - elemTys[i] = cty.DynamicPseudoType - listCount++ - break - } - ety, _ := convert.UnifyUnsafe(allEtys) - if ety == cty.NilType { - return cty.NilType, function.NewArgErrorf(i, "all elements must be of the same type") - } - elemTys[i] = ety - listCount++ - default: - return cty.NilType, function.NewArgErrorf(i, "a set or a list is required") - } - } - - if listCount == len(args) { - return cty.List(cty.Tuple(elemTys)), nil - } - return cty.Set(cty.Tuple(elemTys)), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - ety := retType.ElementType() - - total := 1 - for _, arg := range args { - // Because of our type checking function, we are guaranteed that - // all of the arguments are known, non-null values of types that - // support LengthInt. - total *= arg.LengthInt() - } - - if total == 0 { - // If any of the arguments was an empty collection then our result - // is also an empty collection, which we'll short-circuit here. - if retType.IsListType() { - return cty.ListValEmpty(ety), nil - } - return cty.SetValEmpty(ety), nil - } - - subEtys := ety.TupleElementTypes() - product := make([][]cty.Value, total) - - b := make([]cty.Value, total*len(args)) - n := make([]int, len(args)) - s := 0 - argVals := make([][]cty.Value, len(args)) - for i, arg := range args { - argVals[i] = arg.AsValueSlice() - } - - for i := range product { - e := s + len(args) - pi := b[s:e] - product[i] = pi - s = e - - for j, n := range n { - val := argVals[j][n] - ty := subEtys[j] - if !val.Type().Equals(ty) { - var err error - val, err = convert.Convert(val, ty) - if err != nil { - // Should never happen since we checked this in our - // type-checking function. - return cty.NilVal, fmt.Errorf("failed to convert argVals[%d][%d] to %s; this is a bug in Terraform", j, n, ty.FriendlyName()) - } - } - pi[j] = val - } - - for j := len(n) - 1; j >= 0; j-- { - n[j]++ - if n[j] < len(argVals[j]) { - break - } - n[j] = 0 - } - } - - productVals := make([]cty.Value, total) - for i, vals := range product { - productVals[i] = cty.TupleVal(vals) - } - - if retType.IsListType() { - return cty.ListVal(productVals), nil - } - return cty.SetVal(productVals), nil - }, -}) - -// SliceFunc constructs a function that extracts some consecutive elements -// from within a list. -var SliceFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "start_index", - Type: cty.Number, - }, - { - Name: "end_index", - Type: cty.Number, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - arg := args[0] - argTy := arg.Type() - - if argTy.IsSetType() { - return cty.NilType, function.NewArgErrorf(0, "cannot slice a set, because its elements do not have indices; use the tolist function to force conversion to list if the ordering of the result is not important") - } - if !argTy.IsListType() && !argTy.IsTupleType() { - return cty.NilType, function.NewArgErrorf(0, "must be a list or tuple value") - } - - startIndex, endIndex, idxsKnown, err := sliceIndexes(args) - if err != nil { - return cty.NilType, err - } - - if argTy.IsListType() { - return argTy, nil - } - - if !idxsKnown { - // If we don't know our start/end indices then we can't predict - // the result type if we're planning to return a tuple. - return cty.DynamicPseudoType, nil - } - return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputList := args[0] - - if retType == cty.DynamicPseudoType { - return cty.DynamicVal, nil - } - - // we ignore idxsKnown return value here because the indices are always - // known here, or else the call would've short-circuited. - startIndex, endIndex, _, err := sliceIndexes(args) - if err != nil { - return cty.NilVal, err - } - - if endIndex-startIndex == 0 { - if retType.IsTupleType() { - return cty.EmptyTupleVal, nil - } - return cty.ListValEmpty(retType.ElementType()), nil - } - - outputList := inputList.AsValueSlice()[startIndex:endIndex] - - if retType.IsTupleType() { - return cty.TupleVal(outputList), nil - } - - return cty.ListVal(outputList), nil - }, -}) - -func sliceIndexes(args []cty.Value) (int, int, bool, error) { - var startIndex, endIndex, length int - var startKnown, endKnown, lengthKnown bool - - if args[0].Type().IsTupleType() || args[0].IsKnown() { // if it's a tuple then we always know the length by the type, but lists must be known - length = args[0].LengthInt() - lengthKnown = true - } - - if args[1].IsKnown() { - if err := gocty.FromCtyValue(args[1], &startIndex); err != nil { - return 0, 0, false, function.NewArgErrorf(1, "invalid start index: %s", err) - } - if startIndex < 0 { - return 0, 0, false, function.NewArgErrorf(1, "start index must not be less than zero") - } - if lengthKnown && startIndex > length { - return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than the length of the list") - } - startKnown = true - } - if args[2].IsKnown() { - if err := gocty.FromCtyValue(args[2], &endIndex); err != nil { - return 0, 0, false, function.NewArgErrorf(2, "invalid end index: %s", err) - } - if endIndex < 0 { - return 0, 0, false, function.NewArgErrorf(2, "end index must not be less than zero") - } - if lengthKnown && endIndex > length { - return 0, 0, false, function.NewArgErrorf(2, "end index must not be greater than the length of the list") - } - endKnown = true - } - if startKnown && endKnown { - if startIndex > endIndex { - return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than end index") - } - } - return startIndex, endIndex, startKnown && endKnown, nil -} - -// TransposeFunc contructs a function that takes a map of lists of strings and -// TransposeFunc constructs a function that takes a map of lists of strings and -// swaps the keys and values to produce a new map of lists of strings. -var TransposeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.Map(cty.List(cty.String)), - }, - }, - Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputMap := args[0] - if !inputMap.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - outputMap := make(map[string]cty.Value) - tmpMap := make(map[string][]string) - - for it := inputMap.ElementIterator(); it.Next(); { - inKey, inVal := it.Element() - for iter := inVal.ElementIterator(); iter.Next(); { - _, val := iter.Element() - if !val.Type().Equals(cty.String) { - return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings") - } - - outKey := val.AsString() - if _, ok := tmpMap[outKey]; !ok { - tmpMap[outKey] = make([]string, 0) - } - outVal := tmpMap[outKey] - outVal = append(outVal, inKey.AsString()) - sort.Strings(outVal) - tmpMap[outKey] = outVal - } - } - - for outKey, outVal := range tmpMap { - values := make([]cty.Value, 0) - for _, v := range outVal { - values = append(values, cty.StringVal(v)) - } - outputMap[outKey] = cty.ListVal(values) - } - - return cty.MapVal(outputMap), nil - }, -}) - -// ValuesFunc constructs a function that returns a list of the map values, -// in the order of the sorted keys. -var ValuesFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - ty := args[0].Type() - if ty.IsMapType() { - return cty.List(ty.ElementType()), nil - } else if ty.IsObjectType() { - // The result is a tuple type with all of the same types as our - // object type's attributes, sorted in lexicographical order by the - // keys. (This matches the sort order guaranteed by ElementIterator - // on a cty object value.) - atys := ty.AttributeTypes() - if len(atys) == 0 { - return cty.EmptyTuple, nil - } - attrNames := make([]string, 0, len(atys)) - for name := range atys { - attrNames = append(attrNames, name) - } - sort.Strings(attrNames) - - tys := make([]cty.Type, len(attrNames)) - for i, name := range attrNames { - tys[i] = atys[name] - } - return cty.Tuple(tys), nil - } - return cty.NilType, errors.New("values() requires a map as the first argument") - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - mapVar := args[0] - - // We can just iterate the map/object value here because cty guarantees - // that these types always iterate in key lexicographical order. - var values []cty.Value - for it := mapVar.ElementIterator(); it.Next(); { - _, val := it.Element() - values = append(values, val) - } - - if retType.IsTupleType() { - return cty.TupleVal(values), nil - } - if len(values) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(values), nil - }, -}) - -// ZipmapFunc constructs a function that constructs a map from a list of keys -// and a corresponding list of values. -var ZipmapFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "keys", - Type: cty.List(cty.String), - }, - { - Name: "values", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - keys := args[0] - values := args[1] - valuesTy := values.Type() - - switch { - case valuesTy.IsListType(): - return cty.Map(values.Type().ElementType()), nil - case valuesTy.IsTupleType(): - if !keys.IsWhollyKnown() { - // Since zipmap with a tuple produces an object, we need to know - // all of the key names before we can predict our result type. - return cty.DynamicPseudoType, nil - } - - keysRaw := keys.AsValueSlice() - valueTypesRaw := valuesTy.TupleElementTypes() - if len(keysRaw) != len(valueTypesRaw) { - return cty.NilType, fmt.Errorf("number of keys (%d) does not match number of values (%d)", len(keysRaw), len(valueTypesRaw)) - } - atys := make(map[string]cty.Type, len(valueTypesRaw)) - for i, keyVal := range keysRaw { - if keyVal.IsNull() { - return cty.NilType, fmt.Errorf("keys list has null value at index %d", i) - } - key := keyVal.AsString() - atys[key] = valueTypesRaw[i] - } - return cty.Object(atys), nil - - default: - return cty.NilType, errors.New("values argument must be a list or tuple value") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - keys := args[0] - values := args[1] - - if !keys.IsWhollyKnown() { - // Unknown map keys and object attributes are not supported, so - // our entire result must be unknown in this case. - return cty.UnknownVal(retType), nil - } - - // both keys and values are guaranteed to be shallowly-known here, - // because our declared params above don't allow unknown or null values. - if keys.LengthInt() != values.LengthInt() { - return cty.NilVal, fmt.Errorf("number of keys (%d) does not match number of values (%d)", keys.LengthInt(), values.LengthInt()) - } - - output := make(map[string]cty.Value) - - i := 0 - for it := keys.ElementIterator(); it.Next(); { - _, v := it.Element() - val := values.Index(cty.NumberIntVal(int64(i))) - output[v.AsString()] = val - i++ - } - - switch { - case retType.IsMapType(): - if len(output) == 0 { - return cty.MapValEmpty(retType.ElementType()), nil - } - return cty.MapVal(output), nil - case retType.IsObjectType(): - return cty.ObjectVal(output), nil - default: - // Should never happen because the type-check function should've - // caught any other case. - return cty.NilVal, fmt.Errorf("internally selected incorrect result type %s (this is a bug)", retType.FriendlyName()) - } - }, -}) - -// helper function to add an element to a list, if it does not already exist -func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) { - for _, ele := range slice { - eq, err := stdlib.Equal(ele, element) - if err != nil { - return slice, err - } - if eq.True() { - return slice, nil - } - } - return append(slice, element), nil -} - -// Element returns a single element from a given list at the given index. If -// index is greater than the length of the list then it is wrapped modulo -// the list length. -func Element(list, index cty.Value) (cty.Value, error) { - return ElementFunc.Call([]cty.Value{list, index}) -} - -// Length returns the number of elements in the given collection or number of -// Unicode characters in the given string. -func Length(collection cty.Value) (cty.Value, error) { - return LengthFunc.Call([]cty.Value{collection}) -} - -// Coalesce takes any number of arguments and returns the first one that isn't empty. -func Coalesce(args ...cty.Value) (cty.Value, error) { - return CoalesceFunc.Call(args) -} - -// CoalesceList takes any number of list arguments and returns the first one that isn't empty. -func CoalesceList(args ...cty.Value) (cty.Value, error) { - return CoalesceListFunc.Call(args) -} - -// Compact takes a list of strings and returns a new list -// with any empty string elements removed. -func Compact(list cty.Value) (cty.Value, error) { - return CompactFunc.Call([]cty.Value{list}) -} - -// Contains determines whether a given list contains a given single value -// as one of its elements. -func Contains(list, value cty.Value) (cty.Value, error) { - return ContainsFunc.Call([]cty.Value{list, value}) -} - -// Index finds the element index for a given value in a list. -func Index(list, value cty.Value) (cty.Value, error) { - return IndexFunc.Call([]cty.Value{list, value}) -} - -// Distinct takes a list and returns a new list with any duplicate elements removed. -func Distinct(list cty.Value) (cty.Value, error) { - return DistinctFunc.Call([]cty.Value{list}) -} - -// Chunklist splits a single list into fixed-size chunks, returning a list of lists. -func Chunklist(list, size cty.Value) (cty.Value, error) { - return ChunklistFunc.Call([]cty.Value{list, size}) -} - -// Flatten takes a list and replaces any elements that are lists with a flattened -// sequence of the list contents. -func Flatten(list cty.Value) (cty.Value, error) { - return FlattenFunc.Call([]cty.Value{list}) -} - -// Keys takes a map and returns a sorted list of the map keys. -func Keys(inputMap cty.Value) (cty.Value, error) { - return KeysFunc.Call([]cty.Value{inputMap}) -} - -// List takes any number of list arguments and returns a list containing those -// values in the same order. -func List(args ...cty.Value) (cty.Value, error) { - return ListFunc.Call(args) -} - -// Lookup performs a dynamic lookup into a map. -// There are two required arguments, map and key, plus an optional default, -// which is a value to return if no key is found in map. -func Lookup(args ...cty.Value) (cty.Value, error) { - return LookupFunc.Call(args) -} - -// Map takes an even number of arguments and returns a map whose elements are constructed -// from consecutive pairs of arguments. -func Map(args ...cty.Value) (cty.Value, error) { - return MapFunc.Call(args) -} - -// Matchkeys constructs a new list by taking a subset of elements from one list -// whose indexes match the corresponding indexes of values in another list. -func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { - return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) -} - -// Merge takes an arbitrary number of maps and returns a single map that contains -// a merged set of elements from all of the maps. -// -// If more than one given map defines the same key then the one that is later in -// the argument sequence takes precedence. -func Merge(maps ...cty.Value) (cty.Value, error) { - return MergeFunc.Call(maps) -} - -// Reverse takes a sequence and produces a new sequence of the same length -// with all of the same elements as the given sequence but in reverse order. -func Reverse(list cty.Value) (cty.Value, error) { - return ReverseFunc.Call([]cty.Value{list}) -} - -// SetProduct computes the cartesian product of sets or sequences. -func SetProduct(sets ...cty.Value) (cty.Value, error) { - return SetProductFunc.Call(sets) -} - -// Slice extracts some consecutive elements from within a list. -func Slice(list, start, end cty.Value) (cty.Value, error) { - return SliceFunc.Call([]cty.Value{list, start, end}) -} - -// Transpose takes a map of lists of strings and swaps the keys and values to -// produce a new map of lists of strings. -func Transpose(values cty.Value) (cty.Value, error) { - return TransposeFunc.Call([]cty.Value{values}) -} - -// Values returns a list of the map values, in the order of the sorted keys. -// This function only works on flat maps. -func Values(values cty.Value) (cty.Value, error) { - return ValuesFunc.Call([]cty.Value{values}) -} - -// Zipmap constructs a map from a list of keys and a corresponding list of values. -func Zipmap(keys, values cty.Value) (cty.Value, error) { - return ZipmapFunc.Call([]cty.Value{keys, values}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go deleted file mode 100644 index 83f85979..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go +++ /dev/null @@ -1,87 +0,0 @@ -package funcs - -import ( - "strconv" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// MakeToFunc constructs a "to..." function, like "tostring", which converts -// its argument to a specific type or type kind. -// -// The given type wantTy can be any type constraint that cty's "convert" package -// would accept. In particular, this means that you can pass -// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which -// will then cause cty to attempt to unify all of the element types when given -// a tuple. -func MakeToFunc(wantTy cty.Type) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "v", - // We use DynamicPseudoType rather than wantTy here so that - // all values will pass through the function API verbatim and - // we can handle the conversion logic within the Type and - // Impl functions. This allows us to customize the error - // messages to be more appropriate for an explicit type - // conversion, whereas the cty function system produces - // messages aimed at _implicit_ type conversions. - Type: cty.DynamicPseudoType, - AllowNull: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - gotTy := args[0].Type() - if gotTy.Equals(wantTy) { - return wantTy, nil - } - conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) - if conv == nil { - // We'll use some specialized errors for some trickier cases, - // but most we can handle in a simple way. - switch { - case gotTy.IsTupleType() && wantTy.IsTupleType(): - return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) - case gotTy.IsObjectType() && wantTy.IsObjectType(): - return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) - default: - return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) - } - } - // If a conversion is available then everything is fine. - return wantTy, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - // We didn't set "AllowUnknown" on our argument, so it is guaranteed - // to be known here but may still be null. - ret, err := convert.Convert(args[0], retType) - if err != nil { - // Because we used GetConversionUnsafe above, conversion can - // still potentially fail in here. For example, if the user - // asks to convert the string "a" to bool then we'll - // optimistically permit it during type checking but fail here - // once we note that the value isn't either "true" or "false". - gotTy := args[0].Type() - switch { - case gotTy == cty.String && wantTy == cty.Bool: - what := "string" - if !args[0].IsNull() { - what = strconv.Quote(args[0].AsString()) - } - return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) - case gotTy == cty.String && wantTy == cty.Number: - what := "string" - if !args[0].IsNull() { - what = strconv.Quote(args[0].AsString()) - } - return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) - default: - return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) - } - } - return ret, nil - }, - }) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go deleted file mode 100644 index 28074fb1..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go +++ /dev/null @@ -1,325 +0,0 @@ -package funcs - -import ( - "crypto/md5" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "crypto/x509" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "fmt" - "hash" - - uuidv5 "github.com/google/uuid" - uuid "github.com/hashicorp/go-uuid" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" - "golang.org/x/crypto/bcrypt" -) - -var UUIDFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - result, err := uuid.GenerateUUID() - if err != nil { - return cty.UnknownVal(cty.String), err - } - return cty.StringVal(result), nil - }, -}) - -var UUIDV5Func = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "namespace", - Type: cty.String, - }, - { - Name: "name", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var namespace uuidv5.UUID - switch { - case args[0].AsString() == "dns": - namespace = uuidv5.NameSpaceDNS - case args[0].AsString() == "url": - namespace = uuidv5.NameSpaceURL - case args[0].AsString() == "oid": - namespace = uuidv5.NameSpaceOID - case args[0].AsString() == "x500": - namespace = uuidv5.NameSpaceX500 - default: - if namespace, err = uuidv5.Parse(args[0].AsString()); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err) - } - } - val := args[1].AsString() - return cty.StringVal(uuidv5.NewSHA1(namespace, []byte(val)).String()), nil - }, -}) - -// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string -// and encodes it with Base64. -var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) - -// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileBase64Sha256Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString) -} - -// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string -// and encodes it with Base64. -var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) - -// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileBase64Sha512Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString) -} - -// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. -var BcryptFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "cost", - Type: cty.Number, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - defaultCost := 10 - - if len(args) > 1 { - var val int - if err := gocty.FromCtyValue(args[1], &val); err != nil { - return cty.UnknownVal(cty.String), err - } - defaultCost = val - } - - if len(args) > 2 { - return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") - } - - input := args[0].AsString() - out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error()) - } - - return cty.StringVal(string(out)), nil - }, -}) - -// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. -var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) - -// MakeFileMd5Func constructs a function that is like Md5Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileMd5Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString) -} - -// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. -var RsaDecryptFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "ciphertext", - Type: cty.String, - }, - { - Name: "privatekey", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - s := args[0].AsString() - key := args[1].AsString() - - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode input %q: cipher text must be base64-encoded", s) - } - - block, _ := pem.Decode([]byte(key)) - if block == nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to parse key: no key found") - } - if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - return cty.UnknownVal(cty.String), fmt.Errorf( - "failed to parse key: password protected keys are not supported. Please decrypt the key prior to use", - ) - } - - x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(string(out)), nil - }, -}) - -// Sha1Func contructs a function that computes the SHA1 hash of a given string -// and encodes it with hexadecimal digits. -var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) - -// MakeFileSha1Func constructs a function that is like Sha1Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha1Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString) -} - -// Sha256Func contructs a function that computes the SHA256 hash of a given string -// and encodes it with hexadecimal digits. -var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) - -// MakeFileSha256Func constructs a function that is like Sha256Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha256Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString) -} - -// Sha512Func contructs a function that computes the SHA512 hash of a given string -// and encodes it with hexadecimal digits. -var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) - -// MakeFileSha512Func constructs a function that is like Sha512Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha512Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString) -} - -func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - s := args[0].AsString() - h := hf() - h.Write([]byte(s)) - rv := enc(h.Sum(nil)) - return cty.StringVal(rv), nil - }, - }) -} - -func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - path := args[0].AsString() - src, err := readFileBytes(baseDir, path) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - h := hf() - h.Write(src) - rv := enc(h.Sum(nil)) - return cty.StringVal(rv), nil - }, - }) -} - -// UUID generates and returns a Type-4 UUID in the standard hexadecimal string -// format. -// -// This is not a pure function: it will generate a different result for each -// call. It must therefore be registered as an impure function in the function -// table in the "lang" package. -func UUID() (cty.Value, error) { - return UUIDFunc.Call(nil) -} - -// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string -// format. -func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) { - return UUIDV5Func.Call([]cty.Value{namespace, name}) -} - -// Base64Sha256 computes the SHA256 hash of a given string and encodes it with -// Base64. -// -// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied -// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -func Base64Sha256(str cty.Value) (cty.Value, error) { - return Base64Sha256Func.Call([]cty.Value{str}) -} - -// Base64Sha512 computes the SHA512 hash of a given string and encodes it with -// Base64. -// -// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied -// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 -func Base64Sha512(str cty.Value) (cty.Value, error) { - return Base64Sha512Func.Call([]cty.Value{str}) -} - -// Bcrypt computes a hash of the given string using the Blowfish cipher, -// returning a string in the Modular Crypt Format -// usually expected in the shadow password file on many Unix systems. -func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(cost)+1) - args[0] = str - copy(args[1:], cost) - return BcryptFunc.Call(args) -} - -// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. -func Md5(str cty.Value) (cty.Value, error) { - return Md5Func.Call([]cty.Value{str}) -} - -// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding -// cleartext. -func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { - return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) -} - -// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. -func Sha1(str cty.Value) (cty.Value, error) { - return Sha1Func.Call([]cty.Value{str}) -} - -// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. -func Sha256(str cty.Value) (cty.Value, error) { - return Sha256Func.Call([]cty.Value{str}) -} - -// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. -func Sha512(str cty.Value) (cty.Value, error) { - return Sha512Func.Call([]cty.Value{str}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go deleted file mode 100644 index 5dae1987..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go +++ /dev/null @@ -1,70 +0,0 @@ -package funcs - -import ( - "time" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// TimestampFunc constructs a function that returns a string representation of the current date and time. -var TimestampFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil - }, -}) - -// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. -var TimeAddFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "timestamp", - Type: cty.String, - }, - { - Name: "duration", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - ts, err := time.Parse(time.RFC3339, args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), err - } - duration, err := time.ParseDuration(args[1].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil - }, -}) - -// Timestamp returns a string representation of the current date and time. -// -// In the Terraform language, timestamps are conventionally represented as -// strings using RFC 3339 "Date and Time format" syntax, and so timestamp -// returns a string in this format. -func Timestamp() (cty.Value, error) { - return TimestampFunc.Call([]cty.Value{}) -} - -// TimeAdd adds a duration to a timestamp, returning a new timestamp. -// -// In the Terraform language, timestamps are conventionally represented as -// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires -// the timestamp argument to be a string conforming to this syntax. -// -// `duration` is a string representation of a time difference, consisting of -// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted -// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first -// number may be negative to indicate a negative duration, like `"-2h5m"`. -// -// The result is a string, also in RFC 3339 format, representing the result -// of adding the given direction to the given timestamp. -func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { - return TimeAddFunc.Call([]cty.Value{timestamp, duration}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go deleted file mode 100644 index af93f08d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go +++ /dev/null @@ -1,140 +0,0 @@ -package funcs - -import ( - "bytes" - "compress/gzip" - "encoding/base64" - "fmt" - "log" - "net/url" - "unicode/utf8" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. -var Base64DecodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - s := args[0].AsString() - sDec, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s) - } - if !utf8.Valid([]byte(sDec)) { - log.Printf("[DEBUG] the result of decoding the the provided string is not valid UTF-8: %s", sDec) - return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the the provided string is not valid UTF-8") - } - return cty.StringVal(string(sDec)), nil - }, -}) - -// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. -var Base64EncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil - }, -}) - -// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in -// Base64 encoding. -var Base64GzipFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - s := args[0].AsString() - - var b bytes.Buffer - gz := gzip.NewWriter(&b) - if _, err := gz.Write([]byte(s)); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s) - } - if err := gz.Flush(); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s) - } - if err := gz.Close(); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s) - } - return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil - }, -}) - -// URLEncodeFunc constructs a function that applies URL encoding to a given string. -var URLEncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(url.QueryEscape(args[0].AsString())), nil - }, -}) - -// Base64Decode decodes a string containing a base64 sequence. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will also interpret the resulting bytes as -// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function -// produces an error. -func Base64Decode(str cty.Value) (cty.Value, error) { - return Base64DecodeFunc.Call([]cty.Value{str}) -} - -// Base64Encode applies Base64 encoding to a string. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will first encode the characters from the string -// as UTF-8, and then apply Base64 encoding to the result. -func Base64Encode(str cty.Value) (cty.Value, error) { - return Base64EncodeFunc.Call([]cty.Value{str}) -} - -// Base64Gzip compresses a string with gzip and then encodes the result in -// Base64 encoding. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will first encode the characters from the string -// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. -func Base64Gzip(str cty.Value) (cty.Value, error) { - return Base64GzipFunc.Call([]cty.Value{str}) -} - -// URLEncode applies URL encoding to a given string. -// -// This function identifies characters in the given string that would have a -// special meaning when included as a query string argument in a URL and -// escapes them using RFC 3986 "percent encoding". -// -// If the given string contains non-ASCII characters, these are first encoded as -// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. -func URLEncode(str cty.Value) (cty.Value, error) { - return URLEncodeFunc.Call([]cty.Value{str}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go deleted file mode 100644 index 786d3e74..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go +++ /dev/null @@ -1,360 +0,0 @@ -package funcs - -import ( - "encoding/base64" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "unicode/utf8" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - homedir "github.com/mitchellh/go-homedir" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// MakeFileFunc constructs a function that takes a file path and returns the -// contents of that file, either directly as a string (where valid UTF-8 is -// required) or as a string containing base64 bytes. -func MakeFileFunc(baseDir string, encBase64 bool) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - path := args[0].AsString() - src, err := readFileBytes(baseDir, path) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - switch { - case encBase64: - enc := base64.StdEncoding.EncodeToString(src) - return cty.StringVal(enc), nil - default: - if !utf8.Valid(src) { - return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path) - } - return cty.StringVal(string(src)), nil - } - }, - }) -} - -// MakeTemplateFileFunc constructs a function that takes a file path and -// an arbitrary object of named values and attempts to render the referenced -// file as a template using HCL template syntax. -// -// The template itself may recursively call other functions so a callback -// must be provided to get access to those functions. The template cannot, -// however, access any variables defined in the scope: it is restricted only to -// those variables provided in the second function argument, to ensure that all -// dependencies on other graph nodes can be seen before executing this function. -// -// As a special exception, a referenced template file may not recursively call -// the templatefile function, since that would risk the same file being -// included into itself indefinitely. -func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function { - - params := []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - { - Name: "vars", - Type: cty.DynamicPseudoType, - }, - } - - loadTmpl := func(fn string) (hcl.Expression, error) { - // We re-use File here to ensure the same filename interpretation - // as it does, along with its other safety checks. - tmplVal, err := File(baseDir, cty.StringVal(fn)) - if err != nil { - return nil, err - } - - expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1}) - if diags.HasErrors() { - return nil, diags - } - - return expr, nil - } - - renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) { - if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { - return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time - } - - ctx := &hcl.EvalContext{ - Variables: varsVal.AsValueMap(), - } - - // We'll pre-check references in the template here so we can give a - // more specialized error message than HCL would by default, so it's - // clearer that this problem is coming from a templatefile call. - for _, traversal := range expr.Variables() { - root := traversal.RootName() - if _, ok := ctx.Variables[root]; !ok { - return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()) - } - } - - givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems - funcs := make(map[string]function.Function, len(givenFuncs)) - for name, fn := range givenFuncs { - if name == "templatefile" { - // We stub this one out to prevent recursive calls. - funcs[name] = function.New(&function.Spec{ - Params: params, - Type: func(args []cty.Value) (cty.Type, error) { - return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call") - }, - }) - continue - } - funcs[name] = fn - } - ctx.Functions = funcs - - val, diags := expr.Value(ctx) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - return val, nil - } - - return function.New(&function.Spec{ - Params: params, - Type: func(args []cty.Value) (cty.Type, error) { - if !(args[0].IsKnown() && args[1].IsKnown()) { - return cty.DynamicPseudoType, nil - } - - // We'll render our template now to see what result type it produces. - // A template consisting only of a single interpolation an potentially - // return any type. - expr, err := loadTmpl(args[0].AsString()) - if err != nil { - return cty.DynamicPseudoType, err - } - - // This is safe even if args[1] contains unknowns because the HCL - // template renderer itself knows how to short-circuit those. - val, err := renderTmpl(expr, args[1]) - return val.Type(), err - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - expr, err := loadTmpl(args[0].AsString()) - if err != nil { - return cty.DynamicVal, err - } - return renderTmpl(expr, args[1]) - }, - }) - -} - -// MakeFileExistsFunc constructs a function that takes a path -// and determines whether a file exists at that path -func MakeFileExistsFunc(baseDir string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.Bool), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - path := args[0].AsString() - path, err := homedir.Expand(path) - if err != nil { - return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err) - } - - if !filepath.IsAbs(path) { - path = filepath.Join(baseDir, path) - } - - // Ensure that the path is canonical for the host OS - path = filepath.Clean(path) - - fi, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return cty.False, nil - } - return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path) - } - - if fi.Mode().IsRegular() { - return cty.True, nil - } - - return cty.False, fmt.Errorf("%s is not a regular file, but %q", - path, fi.Mode().String()) - }, - }) -} - -// BasenameFunc constructs a function that takes a string containing a filesystem path -// and removes all except the last portion from it. -var BasenameFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(filepath.Base(args[0].AsString())), nil - }, -}) - -// DirnameFunc constructs a function that takes a string containing a filesystem path -// and removes the last portion from it. -var DirnameFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(filepath.Dir(args[0].AsString())), nil - }, -}) - -// AbsPathFunc constructs a function that converts a filesystem path to an absolute path -var AbsPathFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - absPath, err := filepath.Abs(args[0].AsString()) - return cty.StringVal(filepath.ToSlash(absPath)), err - }, -}) - -// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. -var PathExpandFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - - homePath, err := homedir.Expand(args[0].AsString()) - return cty.StringVal(homePath), err - }, -}) - -func readFileBytes(baseDir, path string) ([]byte, error) { - path, err := homedir.Expand(path) - if err != nil { - return nil, fmt.Errorf("failed to expand ~: %s", err) - } - - if !filepath.IsAbs(path) { - path = filepath.Join(baseDir, path) - } - - // Ensure that the path is canonical for the host OS - path = filepath.Clean(path) - - src, err := ioutil.ReadFile(path) - if err != nil { - // ReadFile does not return Terraform-user-friendly error - // messages, so we'll provide our own. - if os.IsNotExist(err) { - return nil, fmt.Errorf("no file exists at %s", path) - } - return nil, fmt.Errorf("failed to read %s", path) - } - - return src, nil -} - -// File reads the contents of the file at the given path. -// -// The file must contain valid UTF-8 bytes, or this function will return an error. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func File(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileFunc(baseDir, false) - return fn.Call([]cty.Value{path}) -} - -// FileExists determines whether a file exists at the given path. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func FileExists(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileExistsFunc(baseDir) - return fn.Call([]cty.Value{path}) -} - -// FileBase64 reads the contents of the file at the given path. -// -// The bytes from the file are encoded as base64 before returning. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func FileBase64(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileFunc(baseDir, true) - return fn.Call([]cty.Value{path}) -} - -// Basename takes a string containing a filesystem path and removes all except the last portion from it. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the path is empty then the result is ".", representing the current working directory. -func Basename(path cty.Value) (cty.Value, error) { - return BasenameFunc.Call([]cty.Value{path}) -} - -// Dirname takes a string containing a filesystem path and removes the last portion from it. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the path is empty then the result is ".", representing the current working directory. -func Dirname(path cty.Value) (cty.Value, error) { - return DirnameFunc.Call([]cty.Value{path}) -} - -// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with -// the current user's home directory path. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the leading segment in the path is not `~` then the given path is returned unmodified. -func Pathexpand(path cty.Value) (cty.Value, error) { - return PathExpandFunc.Call([]cty.Value{path}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go deleted file mode 100644 index c813f47b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go +++ /dev/null @@ -1,217 +0,0 @@ -package funcs - -import ( - "math" - "math/big" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -// CeilFunc contructs a function that returns the closest whole number greater -// than or equal to the given value. -var CeilFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var val float64 - if err := gocty.FromCtyValue(args[0], &val); err != nil { - return cty.UnknownVal(cty.String), err - } - return cty.NumberIntVal(int64(math.Ceil(val))), nil - }, -}) - -// FloorFunc contructs a function that returns the closest whole number lesser -// than or equal to the given value. -var FloorFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var val float64 - if err := gocty.FromCtyValue(args[0], &val); err != nil { - return cty.UnknownVal(cty.String), err - } - return cty.NumberIntVal(int64(math.Floor(val))), nil - }, -}) - -// LogFunc contructs a function that returns the logarithm of a given number in a given base. -var LogFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - { - Name: "base", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num float64 - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - - var base float64 - if err := gocty.FromCtyValue(args[1], &base); err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil - }, -}) - -// PowFunc contructs a function that returns the logarithm of a given number in a given base. -var PowFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - { - Name: "power", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num float64 - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - - var power float64 - if err := gocty.FromCtyValue(args[1], &power); err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.NumberFloatVal(math.Pow(num, power)), nil - }, -}) - -// SignumFunc contructs a function that returns the closest whole number greater -// than or equal to the given value. -var SignumFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num int - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - switch { - case num < 0: - return cty.NumberIntVal(-1), nil - case num > 0: - return cty.NumberIntVal(+1), nil - default: - return cty.NumberIntVal(0), nil - } - }, -}) - -// ParseIntFunc contructs a function that parses a string argument and returns an integer of the specified base. -var ParseIntFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "number", - Type: cty.DynamicPseudoType, - }, - { - Name: "base", - Type: cty.Number, - }, - }, - - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].Type().Equals(cty.String) { - return cty.Number, function.NewArgErrorf(0, "first argument must be a string, not %s", args[0].Type().FriendlyName()) - } - return cty.Number, nil - }, - - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - var numstr string - var base int - var err error - - if err = gocty.FromCtyValue(args[0], &numstr); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(0, err) - } - - if err = gocty.FromCtyValue(args[1], &base); err != nil { - return cty.UnknownVal(cty.Number), function.NewArgError(1, err) - } - - if base < 2 || base > 62 { - return cty.UnknownVal(cty.Number), function.NewArgErrorf( - 1, - "base must be a whole number between 2 and 62 inclusive", - ) - } - - num, ok := (&big.Int{}).SetString(numstr, base) - if !ok { - return cty.UnknownVal(cty.Number), function.NewArgErrorf( - 0, - "cannot parse %q as a base %d integer", - numstr, - base, - ) - } - - parsedNum := cty.NumberVal((&big.Float{}).SetInt(num)) - - return parsedNum, nil - }, -}) - -// Ceil returns the closest whole number greater than or equal to the given value. -func Ceil(num cty.Value) (cty.Value, error) { - return CeilFunc.Call([]cty.Value{num}) -} - -// Floor returns the closest whole number lesser than or equal to the given value. -func Floor(num cty.Value) (cty.Value, error) { - return FloorFunc.Call([]cty.Value{num}) -} - -// Log returns returns the logarithm of a given number in a given base. -func Log(num, base cty.Value) (cty.Value, error) { - return LogFunc.Call([]cty.Value{num, base}) -} - -// Pow returns the logarithm of a given number in a given base. -func Pow(num, power cty.Value) (cty.Value, error) { - return PowFunc.Call([]cty.Value{num, power}) -} - -// Signum determines the sign of a number, returning a number between -1 and -// 1 to represent the sign. -func Signum(num cty.Value) (cty.Value, error) { - return SignumFunc.Call([]cty.Value{num}) -} - -// ParseInt parses a string argument and returns an integer of the specified base. -func ParseInt(num cty.Value, base cty.Value) (cty.Value, error) { - return ParseIntFunc.Call([]cty.Value{num, base}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go deleted file mode 100644 index c9ddf19e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go +++ /dev/null @@ -1,280 +0,0 @@ -package funcs - -import ( - "fmt" - "regexp" - "sort" - "strings" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -var JoinFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "separator", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "lists", - Type: cty.List(cty.String), - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - sep := args[0].AsString() - listVals := args[1:] - if len(listVals) < 1 { - return cty.UnknownVal(cty.String), fmt.Errorf("at least one list is required") - } - - l := 0 - for _, list := range listVals { - if !list.IsWhollyKnown() { - return cty.UnknownVal(cty.String), nil - } - l += list.LengthInt() - } - - items := make([]string, 0, l) - for ai, list := range listVals { - ei := 0 - for it := list.ElementIterator(); it.Next(); { - _, val := it.Element() - if val.IsNull() { - if len(listVals) > 1 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d of list %d is null; cannot concatenate null values", ei, ai+1) - } - return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d is null; cannot concatenate null values", ei) - } - items = append(items, val.AsString()) - ei++ - } - } - - return cty.StringVal(strings.Join(items, sep)), nil - }, -}) - -var SortFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.String), - }, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - listVal := args[0] - - if !listVal.IsWhollyKnown() { - // If some of the element values aren't known yet then we - // can't yet preduct the order of the result. - return cty.UnknownVal(retType), nil - } - if listVal.LengthInt() == 0 { // Easy path - return listVal, nil - } - - list := make([]string, 0, listVal.LengthInt()) - for it := listVal.ElementIterator(); it.Next(); { - iv, v := it.Element() - if v.IsNull() { - return cty.UnknownVal(retType), fmt.Errorf("given list element %s is null; a null string cannot be sorted", iv.AsBigFloat().String()) - } - list = append(list, v.AsString()) - } - - sort.Strings(list) - retVals := make([]cty.Value, len(list)) - for i, s := range list { - retVals[i] = cty.StringVal(s) - } - return cty.ListVal(retVals), nil - }, -}) - -var SplitFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "separator", - Type: cty.String, - }, - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - sep := args[0].AsString() - str := args[1].AsString() - elems := strings.Split(str, sep) - elemVals := make([]cty.Value, len(elems)) - for i, s := range elems { - elemVals[i] = cty.StringVal(s) - } - if len(elemVals) == 0 { - return cty.ListValEmpty(cty.String), nil - } - return cty.ListVal(elemVals), nil - }, -}) - -// ChompFunc constructions a function that removes newline characters at the end of a string. -var ChompFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) - return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil - }, -}) - -// IndentFunc constructions a function that adds a given number of spaces to the -// beginnings of all but the first line in a given multi-line string. -var IndentFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "spaces", - Type: cty.Number, - }, - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var spaces int - if err := gocty.FromCtyValue(args[0], &spaces); err != nil { - return cty.UnknownVal(cty.String), err - } - data := args[1].AsString() - pad := strings.Repeat(" ", spaces) - return cty.StringVal(strings.Replace(data, "\n", "\n"+pad, -1)), nil - }, -}) - -// ReplaceFunc constructions a function that searches a given string for another -// given substring, and replaces each occurence with a given replacement string. -var ReplaceFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - { - Name: "substr", - Type: cty.String, - }, - { - Name: "replace", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - str := args[0].AsString() - substr := args[1].AsString() - replace := args[2].AsString() - - // We search/replace using a regexp if the string is surrounded - // in forward slashes. - if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { - re, err := regexp.Compile(substr[1 : len(substr)-1]) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(re.ReplaceAllString(str, replace)), nil - } - - return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil - }, -}) - -// TitleFunc constructions a function that converts the first letter of each word -// in the given string to uppercase. -var TitleFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - return cty.StringVal(strings.Title(args[0].AsString())), nil - }, -}) - -// TrimSpaceFunc constructions a function that removes any space characters from -// the start and end of the given string. -var TrimSpaceFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil - }, -}) - -// Join concatenates together the string elements of one or more lists with a -// given separator. -func Join(sep cty.Value, lists ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(lists)+1) - args[0] = sep - copy(args[1:], lists) - return JoinFunc.Call(args) -} - -// Sort re-orders the elements of a given list of strings so that they are -// in ascending lexicographical order. -func Sort(list cty.Value) (cty.Value, error) { - return SortFunc.Call([]cty.Value{list}) -} - -// Split divides a given string by a given separator, returning a list of -// strings containing the characters between the separator sequences. -func Split(sep, str cty.Value) (cty.Value, error) { - return SplitFunc.Call([]cty.Value{sep, str}) -} - -// Chomp removes newline characters at the end of a string. -func Chomp(str cty.Value) (cty.Value, error) { - return ChompFunc.Call([]cty.Value{str}) -} - -// Indent adds a given number of spaces to the beginnings of all but the first -// line in a given multi-line string. -func Indent(spaces, str cty.Value) (cty.Value, error) { - return IndentFunc.Call([]cty.Value{spaces, str}) -} - -// Replace searches a given string for another given substring, -// and replaces all occurences with a given replacement string. -func Replace(str, substr, replace cty.Value) (cty.Value, error) { - return ReplaceFunc.Call([]cty.Value{str, substr, replace}) -} - -// Title converts the first letter of each word in the given string to uppercase. -func Title(str cty.Value) (cty.Value, error) { - return TitleFunc.Call([]cty.Value{str}) -} - -// TrimSpace removes any space characters from the start and end of the given string. -func TrimSpace(str cty.Value) (cty.Value, error) { - return TrimSpaceFunc.Call([]cty.Value{str}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go deleted file mode 100644 index a3c49066..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go +++ /dev/null @@ -1,146 +0,0 @@ -package lang - -import ( - ctyyaml "github.com/zclconf/go-cty-yaml" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/function/stdlib" - - "github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs" -) - -var impureFunctions = []string{ - "bcrypt", - "timestamp", - "uuid", -} - -// Functions returns the set of functions that should be used to when evaluating -// expressions in the receiving scope. -func (s *Scope) Functions() map[string]function.Function { - s.funcsLock.Lock() - if s.funcs == nil { - // Some of our functions are just directly the cty stdlib functions. - // Others are implemented in the subdirectory "funcs" here in this - // repository. New functions should generally start out their lives - // in the "funcs" directory and potentially graduate to cty stdlib - // later if the functionality seems to be something domain-agnostic - // that would be useful to all applications using cty functions. - - s.funcs = map[string]function.Function{ - "abs": stdlib.AbsoluteFunc, - "abspath": funcs.AbsPathFunc, - "basename": funcs.BasenameFunc, - "base64decode": funcs.Base64DecodeFunc, - "base64encode": funcs.Base64EncodeFunc, - "base64gzip": funcs.Base64GzipFunc, - "base64sha256": funcs.Base64Sha256Func, - "base64sha512": funcs.Base64Sha512Func, - "bcrypt": funcs.BcryptFunc, - "ceil": funcs.CeilFunc, - "chomp": funcs.ChompFunc, - "cidrhost": funcs.CidrHostFunc, - "cidrnetmask": funcs.CidrNetmaskFunc, - "cidrsubnet": funcs.CidrSubnetFunc, - "cidrsubnets": funcs.CidrSubnetsFunc, - "coalesce": funcs.CoalesceFunc, - "coalescelist": funcs.CoalesceListFunc, - "compact": funcs.CompactFunc, - "concat": stdlib.ConcatFunc, - "contains": funcs.ContainsFunc, - "csvdecode": stdlib.CSVDecodeFunc, - "dirname": funcs.DirnameFunc, - "distinct": funcs.DistinctFunc, - "element": funcs.ElementFunc, - "chunklist": funcs.ChunklistFunc, - "file": funcs.MakeFileFunc(s.BaseDir, false), - "fileexists": funcs.MakeFileExistsFunc(s.BaseDir), - "filebase64": funcs.MakeFileFunc(s.BaseDir, true), - "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir), - "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir), - "filemd5": funcs.MakeFileMd5Func(s.BaseDir), - "filesha1": funcs.MakeFileSha1Func(s.BaseDir), - "filesha256": funcs.MakeFileSha256Func(s.BaseDir), - "filesha512": funcs.MakeFileSha512Func(s.BaseDir), - "flatten": funcs.FlattenFunc, - "floor": funcs.FloorFunc, - "format": stdlib.FormatFunc, - "formatdate": stdlib.FormatDateFunc, - "formatlist": stdlib.FormatListFunc, - "indent": funcs.IndentFunc, - "index": funcs.IndexFunc, - "join": funcs.JoinFunc, - "jsondecode": stdlib.JSONDecodeFunc, - "jsonencode": stdlib.JSONEncodeFunc, - "keys": funcs.KeysFunc, - "length": funcs.LengthFunc, - "list": funcs.ListFunc, - "log": funcs.LogFunc, - "lookup": funcs.LookupFunc, - "lower": stdlib.LowerFunc, - "map": funcs.MapFunc, - "matchkeys": funcs.MatchkeysFunc, - "max": stdlib.MaxFunc, - "md5": funcs.Md5Func, - "merge": funcs.MergeFunc, - "min": stdlib.MinFunc, - "parseint": funcs.ParseIntFunc, - "pathexpand": funcs.PathExpandFunc, - "pow": funcs.PowFunc, - "range": stdlib.RangeFunc, - "regex": stdlib.RegexFunc, - "regexall": stdlib.RegexAllFunc, - "replace": funcs.ReplaceFunc, - "reverse": funcs.ReverseFunc, - "rsadecrypt": funcs.RsaDecryptFunc, - "setintersection": stdlib.SetIntersectionFunc, - "setproduct": funcs.SetProductFunc, - "setunion": stdlib.SetUnionFunc, - "sha1": funcs.Sha1Func, - "sha256": funcs.Sha256Func, - "sha512": funcs.Sha512Func, - "signum": funcs.SignumFunc, - "slice": funcs.SliceFunc, - "sort": funcs.SortFunc, - "split": funcs.SplitFunc, - "strrev": stdlib.ReverseFunc, - "substr": stdlib.SubstrFunc, - "timestamp": funcs.TimestampFunc, - "timeadd": funcs.TimeAddFunc, - "title": funcs.TitleFunc, - "tostring": funcs.MakeToFunc(cty.String), - "tonumber": funcs.MakeToFunc(cty.Number), - "tobool": funcs.MakeToFunc(cty.Bool), - "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), - "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), - "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), - "transpose": funcs.TransposeFunc, - "trimspace": funcs.TrimSpaceFunc, - "upper": stdlib.UpperFunc, - "urlencode": funcs.URLEncodeFunc, - "uuid": funcs.UUIDFunc, - "uuidv5": funcs.UUIDV5Func, - "values": funcs.ValuesFunc, - "yamldecode": ctyyaml.YAMLDecodeFunc, - "yamlencode": ctyyaml.YAMLEncodeFunc, - "zipmap": funcs.ZipmapFunc, - } - - s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function { - // The templatefile function prevents recursive calls to itself - // by copying this map and overwriting the "templatefile" entry. - return s.funcs - }) - - if s.PureOnly { - // Force our few impure functions to return unknown so that we - // can defer evaluating them until a later pass. - for _, name := range impureFunctions { - s.funcs[name] = function.Unpredictable(s.funcs[name]) - } - } - } - s.funcsLock.Unlock() - - return s.funcs -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go deleted file mode 100644 index 7923d511..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go +++ /dev/null @@ -1,81 +0,0 @@ -package lang - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// References finds all of the references in the given set of traversals, -// returning diagnostics if any of the traversals cannot be interpreted as a -// reference. -// -// This function does not do any de-duplication of references, since references -// have source location information embedded in them and so any invalid -// references that are duplicated should have errors reported for each -// occurence. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. Otherwise, the returned slice has one reference per -// given traversal, though it is not guaranteed that the references will -// appear in the same order as the given traversals. -func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) { - if len(traversals) == 0 { - return nil, nil - } - - var diags tfdiags.Diagnostics - refs := make([]*addrs.Reference, 0, len(traversals)) - - for _, traversal := range traversals { - ref, refDiags := addrs.ParseRef(traversal) - diags = diags.Append(refDiags) - if ref == nil { - continue - } - refs = append(refs, ref) - } - - return refs, diags -} - -// ReferencesInBlock is a helper wrapper around References that first searches -// the given body for traversals, before converting those traversals to -// references. -// -// A block schema must be provided so that this function can determine where in -// the body variables are expected. -func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) { - if body == nil { - return nil, nil - } - - // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or - // dynblock.VariablesHCLDec here because when we evaluate a block we'll - // first apply the dynamic block extension and _then_ the blocktoattr - // transform, and so blocktoattr.ExpandedVariables takes into account - // both of those transforms when it analyzes the body to ensure we find - // all of the references as if they'd already moved into their final - // locations, even though we can't expand dynamic blocks yet until we - // already know which variables are required. - // - // The set of cases we want to detect here is covered by the tests for - // the plan graph builder in the main 'terraform' package, since it's - // in a better position to test this due to having mock providers etc - // available. - traversals := blocktoattr.ExpandedVariables(body, schema) - return References(traversals) -} - -// ReferencesInExpr is a helper wrapper around References that first searches -// the given expression for traversals, before converting those traversals -// to references. -func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { - if expr == nil { - return nil, nil - } - traversals := expr.Variables() - return References(traversals) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go deleted file mode 100644 index a720cca6..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go +++ /dev/null @@ -1,34 +0,0 @@ -package lang - -import ( - "sync" - - "github.com/zclconf/go-cty/cty/function" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Scope is the main type in this package, allowing dynamic evaluation of -// blocks and expressions based on some contextual information that informs -// which variables and functions will be available. -type Scope struct { - // Data is used to resolve references in expressions. - Data Data - - // SelfAddr is the address that the "self" object should be an alias of, - // or nil if the "self" object should not be available at all. - SelfAddr addrs.Referenceable - - // BaseDir is the base directory used by any interpolation functions that - // accept filesystem paths as arguments. - BaseDir string - - // PureOnly can be set to true to request that any non-pure functions - // produce unknown value results rather than actually executing. This is - // important during a plan phase to avoid generating results that could - // then differ during apply. - PureOnly bool - - funcs map[string]function.Function - funcsLock sync.Mutex -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go deleted file mode 100644 index 0d7d664f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package modsdir is an internal package containing the model types used to -// represent the manifest of modules in a local modules cache directory. -package modsdir diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go deleted file mode 100644 index 2d45c852..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go +++ /dev/null @@ -1,138 +0,0 @@ -package modsdir - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Record represents some metadata about an installed module, as part -// of a ModuleManifest. -type Record struct { - // Key is a unique identifier for this particular module, based on its - // position within the static module tree. - Key string `json:"Key"` - - // SourceAddr is the source address given for this module in configuration. - // This is used only to detect if the source was changed in configuration - // since the module was last installed, which means that the installer - // must re-install it. - SourceAddr string `json:"Source"` - - // Version is the exact version of the module, which results from parsing - // VersionStr. nil for un-versioned modules. - Version *version.Version `json:"-"` - - // VersionStr is the version specifier string. This is used only for - // serialization in snapshots and should not be accessed or updated - // by any other codepaths; use "Version" instead. - VersionStr string `json:"Version,omitempty"` - - // Dir is the path to the local directory where the module is installed. - Dir string `json:"Dir"` -} - -// Manifest is a map used to keep track of the filesystem locations -// and other metadata about installed modules. -// -// The configuration loader refers to this, while the module installer updates -// it to reflect any changes to the installed modules. -type Manifest map[string]Record - -func (m Manifest) ModuleKey(path addrs.Module) string { - return path.String() -} - -// manifestSnapshotFile is an internal struct used only to assist in our JSON -// serialization of manifest snapshots. It should not be used for any other -// purpose. -type manifestSnapshotFile struct { - Records []Record `json:"Modules"` -} - -func ReadManifestSnapshot(r io.Reader) (Manifest, error) { - src, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - if len(src) == 0 { - // This should never happen, but we'll tolerate it as if it were - // a valid empty JSON object. - return make(Manifest), nil - } - - var read manifestSnapshotFile - err = json.Unmarshal(src, &read) - - new := make(Manifest) - for _, record := range read.Records { - if record.VersionStr != "" { - record.Version, err = version.NewVersion(record.VersionStr) - if err != nil { - return nil, fmt.Errorf("invalid version %q for %s: %s", record.VersionStr, record.Key, err) - } - } - if _, exists := new[record.Key]; exists { - // This should never happen in any valid file, so we'll catch it - // and report it to avoid confusing/undefined behavior if the - // snapshot file was edited incorrectly outside of Terraform. - return nil, fmt.Errorf("snapshot file contains two records for path %s", record.Key) - } - new[record.Key] = record - } - return new, nil -} - -func ReadManifestSnapshotForDir(dir string) (Manifest, error) { - fn := filepath.Join(dir, ManifestSnapshotFilename) - r, err := os.Open(fn) - if err != nil { - if os.IsNotExist(err) { - return make(Manifest), nil // missing file is okay and treated as empty - } - return nil, err - } - return ReadManifestSnapshot(r) -} - -func (m Manifest) WriteSnapshot(w io.Writer) error { - var write manifestSnapshotFile - - for _, record := range m { - // Make sure VersionStr is in sync with Version, since we encourage - // callers to manipulate Version and ignore VersionStr. - if record.Version != nil { - record.VersionStr = record.Version.String() - } else { - record.VersionStr = "" - } - write.Records = append(write.Records, record) - } - - src, err := json.Marshal(write) - if err != nil { - return err - } - - _, err = w.Write(src) - return err -} - -func (m Manifest) WriteSnapshotToDir(dir string) error { - fn := filepath.Join(dir, ManifestSnapshotFilename) - log.Printf("[TRACE] modsdir: writing modules manifest to %s", fn) - w, err := os.Create(fn) - if err != nil { - return err - } - return m.WriteSnapshot(w) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go deleted file mode 100644 index 9ebb5243..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go +++ /dev/null @@ -1,3 +0,0 @@ -package modsdir - -const ManifestSnapshotFilename = "modules.json" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go deleted file mode 100644 index c8058871..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go +++ /dev/null @@ -1,43 +0,0 @@ -package moduledeps - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" -) - -// Providers describes a set of provider dependencies for a given module. -// -// Each named provider instance can have one version constraint. -type Providers map[ProviderInstance]ProviderDependency - -// ProviderDependency describes the dependency for a particular provider -// instance, including both the set of allowed versions and the reason for -// the dependency. -type ProviderDependency struct { - Constraints discovery.Constraints - Reason ProviderDependencyReason -} - -// ProviderDependencyReason is an enumeration of reasons why a dependency might be -// present. -type ProviderDependencyReason int - -const ( - // ProviderDependencyExplicit means that there is an explicit "provider" - // block in the configuration for this module. - ProviderDependencyExplicit ProviderDependencyReason = iota - - // ProviderDependencyImplicit means that there is no explicit "provider" - // block but there is at least one resource that uses this provider. - ProviderDependencyImplicit - - // ProviderDependencyInherited is a special case of - // ProviderDependencyImplicit where a parent module has defined a - // configuration for the provider that has been inherited by at least one - // resource in this module. - ProviderDependencyInherited - - // ProviderDependencyFromState means that this provider is not currently - // referenced by configuration at all, but some existing instances in - // the state still depend on it. - ProviderDependencyFromState -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go deleted file mode 100644 index 7eff0831..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package moduledeps contains types that can be used to describe the -// providers required for all of the modules in a module tree. -// -// It does not itself contain the functionality for populating such -// data structures; that's in Terraform core, since this package intentionally -// does not depend on terraform core to avoid package dependency cycles. -package moduledeps diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go deleted file mode 100644 index 5189acfc..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go +++ /dev/null @@ -1,134 +0,0 @@ -package moduledeps - -import ( - "sort" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" -) - -// Module represents the dependencies of a single module, as well being -// a node in a tree of such structures representing the dependencies of -// an entire configuration. -type Module struct { - Name string - Providers Providers - Children []*Module -} - -// WalkFunc is a callback type for use with Module.WalkTree -type WalkFunc func(path []string, parent *Module, current *Module) error - -// WalkTree calls the given callback once for the receiver and then -// once for each descendent, in an order such that parents are called -// before their children and siblings are called in the order they -// appear in the Children slice. -// -// When calling the callback, parent will be nil for the first call -// for the receiving module, and then set to the direct parent of -// each module for the subsequent calls. -// -// The path given to the callback is valid only until the callback -// returns, after which it will be mutated and reused. Callbacks must -// therefore copy the path slice if they wish to retain it. -// -// If the given callback returns an error, the walk will be aborted at -// that point and that error returned to the caller. -// -// This function is not thread-safe for concurrent modifications of the -// data structure, so it's the caller's responsibility to arrange for that -// should it be needed. -// -// It is safe for a callback to modify the descendents of the "current" -// module, including the ordering of the Children slice itself, but the -// callback MUST NOT modify the parent module. -func (m *Module) WalkTree(cb WalkFunc) error { - return walkModuleTree(make([]string, 0, 1), nil, m, cb) -} - -func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { - path = append(path, current.Name) - err := cb(path, parent, current) - if err != nil { - return err - } - - for _, child := range current.Children { - err := walkModuleTree(path, current, child, cb) - if err != nil { - return err - } - } - return nil -} - -// SortChildren sorts the Children slice into lexicographic order by -// name, in-place. -// -// This is primarily useful prior to calling WalkTree so that the walk -// will proceed in a consistent order. -func (m *Module) SortChildren() { - sort.Sort(sortModules{m.Children}) -} - -type sortModules struct { - modules []*Module -} - -func (s sortModules) Len() int { - return len(s.modules) -} - -func (s sortModules) Less(i, j int) bool { - cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) - return cmp < 0 -} - -func (s sortModules) Swap(i, j int) { - s.modules[i], s.modules[j] = s.modules[j], s.modules[i] -} - -// PluginRequirements produces a PluginRequirements structure that can -// be used with discovery.PluginMetaSet.ConstrainVersions to identify -// suitable plugins to satisfy the module's provider dependencies. -// -// This method only considers the direct requirements of the receiver. -// Use AllPluginRequirements to flatten the dependencies for the -// entire tree of modules. -// -// Requirements returned by this method include only version constraints, -// and apply no particular SHA256 hash constraint. -func (m *Module) PluginRequirements() discovery.PluginRequirements { - ret := make(discovery.PluginRequirements) - for inst, dep := range m.Providers { - // m.Providers is keyed on provider names, such as "aws.foo". - // a PluginRequirements wants keys to be provider *types*, such - // as "aws". If there are multiple aliases for the same - // provider then we will flatten them into a single requirement - // by combining their constraint sets. - pty := inst.Type() - if existing, exists := ret[pty]; exists { - ret[pty].Versions = existing.Versions.Append(dep.Constraints) - } else { - ret[pty] = &discovery.PluginConstraints{ - Versions: dep.Constraints, - } - } - } - return ret -} - -// AllPluginRequirements calls PluginRequirements for the receiver and all -// of its descendents, and merges the result into a single PluginRequirements -// structure that would satisfy all of the modules together. -// -// Requirements returned by this method include only version constraints, -// and apply no particular SHA256 hash constraint. -func (m *Module) AllPluginRequirements() discovery.PluginRequirements { - var ret discovery.PluginRequirements - m.WalkTree(func(path []string, parent *Module, current *Module) error { - ret = ret.Merge(current.PluginRequirements()) - return nil - }) - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go deleted file mode 100644 index 89ceefb2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go +++ /dev/null @@ -1,30 +0,0 @@ -package moduledeps - -import ( - "strings" -) - -// ProviderInstance describes a particular provider instance by its full name, -// like "null" or "aws.foo". -type ProviderInstance string - -// Type returns the provider type of this instance. For example, for an instance -// named "aws.foo" the type is "aws". -func (p ProviderInstance) Type() string { - t := string(p) - if dotPos := strings.Index(t, "."); dotPos != -1 { - t = t[:dotPos] - } - return t -} - -// Alias returns the alias of this provider, if any. An instance named "aws.foo" -// has the alias "foo", while an instance named just "docker" has no alias, -// so the empty string would be returned. -func (p ProviderInstance) Alias() string { - t := string(p) - if dotPos := strings.Index(t, "."); dotPos != -1 { - return t[dotPos+1:] - } - return "" -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go deleted file mode 100644 index c653b106..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go +++ /dev/null @@ -1,22 +0,0 @@ -package plans - -type Action rune - -const ( - NoOp Action = 0 - Create Action = '+' - Read Action = '←' - Update Action = '~' - DeleteThenCreate Action = '∓' - CreateThenDelete Action = '±' - Delete Action = '-' -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type Action - -// IsReplace returns true if the action is one of the two actions that -// represents replacing an existing object with a new object: -// DeleteThenCreate or CreateThenDelete. -func (a Action) IsReplace() bool { - return a == DeleteThenCreate || a == CreateThenDelete -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go deleted file mode 100644 index be43ab17..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go +++ /dev/null @@ -1,49 +0,0 @@ -// Code generated by "stringer -type Action"; DO NOT EDIT. - -package plans - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[NoOp-0] - _ = x[Create-43] - _ = x[Read-8592] - _ = x[Update-126] - _ = x[DeleteThenCreate-8723] - _ = x[CreateThenDelete-177] - _ = x[Delete-45] -} - -const ( - _Action_name_0 = "NoOp" - _Action_name_1 = "Create" - _Action_name_2 = "Delete" - _Action_name_3 = "Update" - _Action_name_4 = "CreateThenDelete" - _Action_name_5 = "Read" - _Action_name_6 = "DeleteThenCreate" -) - -func (i Action) String() string { - switch { - case i == 0: - return _Action_name_0 - case i == 43: - return _Action_name_1 - case i == 45: - return _Action_name_2 - case i == 126: - return _Action_name_3 - case i == 177: - return _Action_name_4 - case i == 8592: - return _Action_name_5 - case i == 8723: - return _Action_name_6 - default: - return "Action(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go deleted file mode 100644 index 5c2028c8..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go +++ /dev/null @@ -1,308 +0,0 @@ -package plans - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/zclconf/go-cty/cty" -) - -// Changes describes various actions that Terraform will attempt to take if -// the corresponding plan is applied. -// -// A Changes object can be rendered into a visual diff (by the caller, using -// code in another package) for display to the user. -type Changes struct { - // Resources tracks planned changes to resource instance objects. - Resources []*ResourceInstanceChangeSrc - - // Outputs tracks planned changes output values. - // - // Note that although an in-memory plan contains planned changes for - // outputs throughout the configuration, a plan serialized - // to disk retains only the root outputs because they are - // externally-visible, while other outputs are implementation details and - // can be easily re-calculated during the apply phase. Therefore only root - // module outputs will survive a round-trip through a plan file. - Outputs []*OutputChangeSrc -} - -// NewChanges returns a valid Changes object that describes no changes. -func NewChanges() *Changes { - return &Changes{} -} - -func (c *Changes) Empty() bool { - for _, res := range c.Resources { - if res.Action != NoOp { - return false - } - } - return true -} - -// ResourceInstance returns the planned change for the current object of the -// resource instance of the given address, if any. Returns nil if no change is -// planned. -func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc { - addrStr := addr.String() - for _, rc := range c.Resources { - if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed { - return rc - } - } - - return nil -} - -// ResourceInstanceDeposed returns the plan change of a deposed object of -// the resource instance of the given address, if any. Returns nil if no change -// is planned. -func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc { - addrStr := addr.String() - for _, rc := range c.Resources { - if rc.Addr.String() == addrStr && rc.DeposedKey == key { - return rc - } - } - - return nil -} - -// OutputValue returns the planned change for the output value with the -// given address, if any. Returns nil if no change is planned. -func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc { - addrStr := addr.String() - for _, oc := range c.Outputs { - if oc.Addr.String() == addrStr { - return oc - } - } - - return nil -} - -// SyncWrapper returns a wrapper object around the receiver that can be used -// to make certain changes to the receiver in a concurrency-safe way, as long -// as all callers share the same wrapper object. -func (c *Changes) SyncWrapper() *ChangesSync { - return &ChangesSync{ - changes: c, - } -} - -// ResourceInstanceChange describes a change to a particular resource instance -// object. -type ResourceInstanceChange struct { - // Addr is the absolute address of the resource instance that the change - // will apply to. - Addr addrs.AbsResourceInstance - - // DeposedKey is the identifier for a deposed object associated with the - // given instance, or states.NotDeposed if this change applies to the - // current object. - // - // A Replace change for a resource with create_before_destroy set will - // create a new DeposedKey temporarily during replacement. In that case, - // DeposedKey in the plan is always states.NotDeposed, representing that - // the current object is being replaced with the deposed. - DeposedKey states.DeposedKey - - // Provider is the address of the provider configuration that was used - // to plan this change, and thus the configuration that must also be - // used to apply it. - ProviderAddr addrs.AbsProviderConfig - - // Change is an embedded description of the change. - Change - - // RequiredReplace is a set of paths that caused the change action to be - // Replace rather than Update. Always nil if the change action is not - // Replace. - // - // This is retained only for UI-plan-rendering purposes and so it does not - // currently survive a round-trip through a saved plan file. - RequiredReplace cty.PathSet - - // Private allows a provider to stash any extra data that is opaque to - // Terraform that relates to this change. Terraform will save this - // byte-for-byte and return it to the provider in the apply call. - Private []byte -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. Pass the implied type of the -// corresponding resource type schema for correct operation. -func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) { - cs, err := rc.Change.Encode(ty) - if err != nil { - return nil, err - } - return &ResourceInstanceChangeSrc{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - ProviderAddr: rc.ProviderAddr, - ChangeSrc: *cs, - RequiredReplace: rc.RequiredReplace, - Private: rc.Private, - }, err -} - -// Simplify will, where possible, produce a change with a simpler action than -// the receiever given a flag indicating whether the caller is dealing with -// a normal apply or a destroy. This flag deals with the fact that Terraform -// Core uses a specialized graph node type for destroying; only that -// specialized node should set "destroying" to true. -// -// The following table shows the simplification behavior: -// -// Action Destroying? New Action -// --------+-------------+----------- -// Create true NoOp -// Delete false NoOp -// Replace true Delete -// Replace false Create -// -// For any combination not in the above table, the Simplify just returns the -// receiver as-is. -func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange { - if destroying { - switch rc.Action { - case Delete: - // We'll fall out and just return rc verbatim, then. - case CreateThenDelete, DeleteThenCreate: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: Delete, - Before: rc.Before, - After: cty.NullVal(rc.Before.Type()), - }, - } - default: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: NoOp, - Before: rc.Before, - After: rc.Before, - }, - } - } - } else { - switch rc.Action { - case Delete: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: NoOp, - Before: rc.Before, - After: rc.Before, - }, - } - case CreateThenDelete, DeleteThenCreate: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: Create, - Before: cty.NullVal(rc.After.Type()), - After: rc.After, - }, - } - } - } - - // If we fall out here then our change is already simple enough. - return rc -} - -// OutputChange describes a change to an output value. -type OutputChange struct { - // Addr is the absolute address of the output value that the change - // will apply to. - Addr addrs.AbsOutputValue - - // Change is an embedded description of the change. - // - // For output value changes, the type constraint for the DynamicValue - // instances is always cty.DynamicPseudoType. - Change - - // Sensitive, if true, indicates that either the old or new value in the - // change is sensitive and so a rendered version of the plan in the UI - // should elide the actual values while still indicating the action of the - // change. - Sensitive bool -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. -func (oc *OutputChange) Encode() (*OutputChangeSrc, error) { - cs, err := oc.Change.Encode(cty.DynamicPseudoType) - if err != nil { - return nil, err - } - return &OutputChangeSrc{ - Addr: oc.Addr, - ChangeSrc: *cs, - Sensitive: oc.Sensitive, - }, err -} - -// Change describes a single change with a given action. -type Change struct { - // Action defines what kind of change is being made. - Action Action - - // Interpretation of Before and After depend on Action: - // - // NoOp Before and After are the same, unchanged value - // Create Before is nil, and After is the expected value after create. - // Read Before is any prior value (nil if no prior), and After is the - // value that was or will be read. - // Update Before is the value prior to update, and After is the expected - // value after update. - // Replace As with Update. - // Delete Before is the value prior to delete, and After is always nil. - // - // Unknown values may appear anywhere within the Before and After values, - // either as the values themselves or as nested elements within known - // collections/structures. - Before, After cty.Value -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. Pass the type constraint -// that the values are expected to conform to; to properly decode the values -// later an identical type constraint must be provided at that time. -// -// Where a Change is embedded in some other struct, it's generally better -// to call the corresponding Encode method of that struct rather than working -// directly with its embedded Change. -func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) { - beforeDV, err := NewDynamicValue(c.Before, ty) - if err != nil { - return nil, err - } - afterDV, err := NewDynamicValue(c.After, ty) - if err != nil { - return nil, err - } - - return &ChangeSrc{ - Action: c.Action, - Before: beforeDV, - After: afterDV, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go deleted file mode 100644 index 97bc8da7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go +++ /dev/null @@ -1,190 +0,0 @@ -package plans - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/zclconf/go-cty/cty" -) - -// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange. -// Pass the associated resource type's schema type to method Decode to -// obtain a ResourceInstancChange. -type ResourceInstanceChangeSrc struct { - // Addr is the absolute address of the resource instance that the change - // will apply to. - Addr addrs.AbsResourceInstance - - // DeposedKey is the identifier for a deposed object associated with the - // given instance, or states.NotDeposed if this change applies to the - // current object. - // - // A Replace change for a resource with create_before_destroy set will - // create a new DeposedKey temporarily during replacement. In that case, - // DeposedKey in the plan is always states.NotDeposed, representing that - // the current object is being replaced with the deposed. - DeposedKey states.DeposedKey - - // Provider is the address of the provider configuration that was used - // to plan this change, and thus the configuration that must also be - // used to apply it. - ProviderAddr addrs.AbsProviderConfig - - // ChangeSrc is an embedded description of the not-yet-decoded change. - ChangeSrc - - // RequiredReplace is a set of paths that caused the change action to be - // Replace rather than Update. Always nil if the change action is not - // Replace. - // - // This is retained only for UI-plan-rendering purposes and so it does not - // currently survive a round-trip through a saved plan file. - RequiredReplace cty.PathSet - - // Private allows a provider to stash any extra data that is opaque to - // Terraform that relates to this change. Terraform will save this - // byte-for-byte and return it to the provider in the apply call. - Private []byte -} - -// Decode unmarshals the raw representation of the instance object being -// changed. Pass the implied type of the corresponding resource type schema -// for correct operation. -func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) { - change, err := rcs.ChangeSrc.Decode(ty) - if err != nil { - return nil, err - } - return &ResourceInstanceChange{ - Addr: rcs.Addr, - DeposedKey: rcs.DeposedKey, - ProviderAddr: rcs.ProviderAddr, - Change: *change, - RequiredReplace: rcs.RequiredReplace, - Private: rcs.Private, - }, nil -} - -// DeepCopy creates a copy of the receiver where any pointers to nested mutable -// values are also copied, thus ensuring that future mutations of the receiver -// will not affect the copy. -// -// Some types used within a resource change are immutable by convention even -// though the Go language allows them to be mutated, such as the types from -// the addrs package. These are _not_ copied by this method, under the -// assumption that callers will behave themselves. -func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc { - if rcs == nil { - return nil - } - ret := *rcs - - ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...) - - if len(ret.Private) != 0 { - private := make([]byte, len(ret.Private)) - copy(private, ret.Private) - ret.Private = private - } - - ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() - ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() - - return &ret -} - -// OutputChangeSrc describes a change to an output value. -type OutputChangeSrc struct { - // Addr is the absolute address of the output value that the change - // will apply to. - Addr addrs.AbsOutputValue - - // ChangeSrc is an embedded description of the not-yet-decoded change. - // - // For output value changes, the type constraint for the DynamicValue - // instances is always cty.DynamicPseudoType. - ChangeSrc - - // Sensitive, if true, indicates that either the old or new value in the - // change is sensitive and so a rendered version of the plan in the UI - // should elide the actual values while still indicating the action of the - // change. - Sensitive bool -} - -// Decode unmarshals the raw representation of the output value being -// changed. -func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) { - change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType) - if err != nil { - return nil, err - } - return &OutputChange{ - Addr: ocs.Addr, - Change: *change, - Sensitive: ocs.Sensitive, - }, nil -} - -// DeepCopy creates a copy of the receiver where any pointers to nested mutable -// values are also copied, thus ensuring that future mutations of the receiver -// will not affect the copy. -// -// Some types used within a resource change are immutable by convention even -// though the Go language allows them to be mutated, such as the types from -// the addrs package. These are _not_ copied by this method, under the -// assumption that callers will behave themselves. -func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc { - if ocs == nil { - return nil - } - ret := *ocs - - ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() - ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() - - return &ret -} - -// ChangeSrc is a not-yet-decoded Change. -type ChangeSrc struct { - // Action defines what kind of change is being made. - Action Action - - // Before and After correspond to the fields of the same name in Change, - // but have not yet been decoded from the serialized value used for - // storage. - Before, After DynamicValue -} - -// Decode unmarshals the raw representations of the before and after values -// to produce a Change object. Pass the type constraint that the result must -// conform to. -// -// Where a ChangeSrc is embedded in some other struct, it's generally better -// to call the corresponding Decode method of that struct rather than working -// directly with its embedded Change. -func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) { - var err error - before := cty.NullVal(ty) - after := cty.NullVal(ty) - - if len(cs.Before) > 0 { - before, err = cs.Before.Decode(ty) - if err != nil { - return nil, fmt.Errorf("error decoding 'before' value: %s", err) - } - } - if len(cs.After) > 0 { - after, err = cs.After.Decode(ty) - if err != nil { - return nil, fmt.Errorf("error decoding 'after' value: %s", err) - } - } - return &Change{ - Action: cs.Action, - Before: before, - After: after, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go deleted file mode 100644 index 89cc1ab2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go +++ /dev/null @@ -1,144 +0,0 @@ -package plans - -import ( - "fmt" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// ChangesSync is a wrapper around a Changes that provides a concurrency-safe -// interface to insert new changes and retrieve copies of existing changes. -// -// Each ChangesSync is independent of all others, so all concurrent writers -// to a particular Changes must share a single ChangesSync. Behavior is -// undefined if any other caller makes changes to the underlying Changes -// object or its nested objects concurrently with any of the methods of a -// particular ChangesSync. -type ChangesSync struct { - lock sync.Mutex - changes *Changes -} - -// AppendResourceInstanceChange records the given resource instance change in -// the set of planned resource changes. -// -// The caller must ensure that there are no concurrent writes to the given -// change while this method is running, but it is safe to resume mutating -// it after this method returns without affecting the saved change. -func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) { - if cs == nil { - panic("AppendResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - s := changeSrc.DeepCopy() - cs.changes.Resources = append(cs.changes.Resources, s) -} - -// GetResourceInstanceChange searches the set of resource instance changes for -// one matching the given address and generation, returning it if it exists. -// -// If no such change exists, nil is returned. -// -// The returned object is a deep copy of the change recorded in the plan, so -// callers may mutate it although it's generally better (less confusing) to -// treat planned changes as immutable after they've been initially constructed. -func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc { - if cs == nil { - panic("GetResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - if gen == states.CurrentGen { - return cs.changes.ResourceInstance(addr).DeepCopy() - } - if dk, ok := gen.(states.DeposedKey); ok { - return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy() - } - panic(fmt.Sprintf("unsupported generation value %#v", gen)) -} - -// RemoveResourceInstanceChange searches the set of resource instance changes -// for one matching the given address and generation, and removes it from the -// set if it exists. -func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) { - if cs == nil { - panic("RemoveResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - dk := states.NotDeposed - if realDK, ok := gen.(states.DeposedKey); ok { - dk = realDK - } - - addrStr := addr.String() - for i, r := range cs.changes.Resources { - if r.Addr.String() != addrStr || r.DeposedKey != dk { - continue - } - copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:]) - cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1] - return - } -} - -// AppendOutputChange records the given output value change in the set of -// planned value changes. -// -// The caller must ensure that there are no concurrent writes to the given -// change while this method is running, but it is safe to resume mutating -// it after this method returns without affecting the saved change. -func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) { - if cs == nil { - panic("AppendOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - s := changeSrc.DeepCopy() - cs.changes.Outputs = append(cs.changes.Outputs, s) -} - -// GetOutputChange searches the set of output value changes for one matching -// the given address, returning it if it exists. -// -// If no such change exists, nil is returned. -// -// The returned object is a deep copy of the change recorded in the plan, so -// callers may mutate it although it's generally better (less confusing) to -// treat planned changes as immutable after they've been initially constructed. -func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc { - if cs == nil { - panic("GetOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - return cs.changes.OutputValue(addr) -} - -// RemoveOutputChange searches the set of output value changes for one matching -// the given address, and removes it from the set if it exists. -func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) { - if cs == nil { - panic("RemoveOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - addrStr := addr.String() - for i, o := range cs.changes.Outputs { - if o.Addr.String() != addrStr { - continue - } - copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:]) - cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1] - return - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go deleted file mode 100644 index 01ca3892..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package plans contains the types that are used to represent Terraform plans. -// -// A plan describes a set of changes that Terraform will make to update remote -// objects to match with changes to the configuration. -package plans diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go deleted file mode 100644 index 51fbb24c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go +++ /dev/null @@ -1,96 +0,0 @@ -package plans - -import ( - "github.com/zclconf/go-cty/cty" - ctymsgpack "github.com/zclconf/go-cty/cty/msgpack" -) - -// DynamicValue is the representation in the plan of a value whose type cannot -// be determined at compile time, such as because it comes from a schema -// defined in a plugin. -// -// This type is used as an indirection so that the overall plan structure can -// be decoded without schema available, and then the dynamic values accessed -// at a later time once the appropriate schema has been determined. -// -// Internally, DynamicValue is a serialized version of a cty.Value created -// against a particular type constraint. Callers should not access directly -// the serialized form, whose format may change in future. Values of this -// type must always be created by calling NewDynamicValue. -// -// The zero value of DynamicValue is nil, and represents the absense of a -// value within the Go type system. This is distinct from a cty.NullVal -// result, which represents the absense of a value within the cty type system. -type DynamicValue []byte - -// NewDynamicValue creates a DynamicValue by serializing the given value -// against the given type constraint. The value must conform to the type -// constraint, or the result is undefined. -// -// If the value to be encoded has no predefined schema (for example, for -// module output values and input variables), set the type constraint to -// cty.DynamicPseudoType in order to save type information as part of the -// value, and then also pass cty.DynamicPseudoType to method Decode to recover -// the original value. -// -// cty.NilVal can be used to represent the absense of a value, but callers -// must be careful to distinguish values that are absent at the Go layer -// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal -// results). -func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) { - // If we're given cty.NilVal (the zero value of cty.Value, which is - // distinct from a typed null value created by cty.NullVal) then we'll - // assume the caller is trying to represent the _absense_ of a value, - // and so we'll return a nil DynamicValue. - if val == cty.NilVal { - return DynamicValue(nil), nil - } - - // Currently our internal encoding is msgpack, via ctymsgpack. - buf, err := ctymsgpack.Marshal(val, ty) - if err != nil { - return nil, err - } - - return DynamicValue(buf), nil -} - -// Decode retrieves the effective value from the receiever by interpreting the -// serialized form against the given type constraint. For correct results, -// the type constraint must match (or be consistent with) the one that was -// used to create the receiver. -// -// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and -// instead represents the absense of a value. -func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) { - if v == nil { - return cty.NilVal, nil - } - - return ctymsgpack.Unmarshal([]byte(v), ty) -} - -// ImpliedType returns the type implied by the serialized structure of the -// receiving value. -// -// This will not necessarily be exactly the type that was given when the -// value was encoded, and in particular must not be used for values that -// were encoded with their static type given as cty.DynamicPseudoType. -// It is however safe to use this method for values that were encoded using -// their runtime type as the conforming type, with the result being -// semantically equivalent but with all lists and sets represented as tuples, -// and maps as objects, due to ambiguities of the serialization. -func (v DynamicValue) ImpliedType() (cty.Type, error) { - return ctymsgpack.ImpliedType([]byte(v)) -} - -// Copy produces a copy of the receiver with a distinct backing array. -func (v DynamicValue) Copy() DynamicValue { - if v == nil { - return nil - } - - ret := make(DynamicValue, len(v)) - copy(ret, v) - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go deleted file mode 100644 index ba9cc961..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go +++ /dev/null @@ -1,18 +0,0 @@ -package objchange - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// AllAttributesNull constructs a non-null cty.Value of the object type implied -// by the given schema that has all of its leaf attributes set to null and all -// of its nested block collections set to zero-length. -// -// This simulates what would result from decoding an empty configuration block -// with the given schema, except that it does not produce errors -func AllAttributesNull(schema *configschema.Block) cty.Value { - // "All attributes null" happens to be the definition of EmptyValue for - // a Block, so we can just delegate to that. - return schema.EmptyValue() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go deleted file mode 100644 index 36a7d496..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go +++ /dev/null @@ -1,447 +0,0 @@ -package objchange - -import ( - "fmt" - "strconv" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" -) - -// AssertObjectCompatible checks whether the given "actual" value is a valid -// completion of the possibly-partially-unknown "planned" value. -// -// This means that any known leaf value in "planned" must be equal to the -// corresponding value in "actual", and various other similar constraints. -// -// Any inconsistencies are reported by returning a non-zero number of errors. -// These errors are usually (but not necessarily) cty.PathError values -// referring to a particular nested value within the "actual" value. -// -// The two values must have types that conform to the given schema's implied -// type, or this function will panic. -func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error { - return assertObjectCompatible(schema, planned, actual, nil) -} - -func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error { - var errs []error - if planned.IsNull() && !actual.IsNull() { - errs = append(errs, path.NewErrorf("was absent, but now present")) - return errs - } - if actual.IsNull() && !planned.IsNull() { - errs = append(errs, path.NewErrorf("was present, but now absent")) - return errs - } - if planned.IsNull() { - // No further checks possible if both values are null - return errs - } - - for name, attrS := range schema.Attributes { - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - - path := append(path, cty.GetAttrStep{Name: name}) - moreErrs := assertValueCompatible(plannedV, actualV, path) - if attrS.Sensitive { - if len(moreErrs) > 0 { - // Use a vague placeholder message instead, to avoid disclosing - // sensitive information. - errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute")) - } - } else { - errs = append(errs, moreErrs...) - } - } - for name, blockS := range schema.BlockTypes { - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - - // As a special case, if there were any blocks whose leaf attributes - // are all unknown then we assume (possibly incorrectly) that the - // HCL dynamic block extension is in use with an unknown for_each - // argument, and so we will do looser validation here that allows - // for those blocks to have expanded into a different number of blocks - // if the for_each value is now known. - maybeUnknownBlocks := couldHaveUnknownBlockPlaceholder(plannedV, blockS, false) - - path := append(path, cty.GetAttrStep{Name: name}) - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - // If an unknown block placeholder was present then the placeholder - // may have expanded out into zero blocks, which is okay. - if maybeUnknownBlocks && actualV.IsNull() { - continue - } - moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path) - errs = append(errs, moreErrs...) - case configschema.NestingList: - // A NestingList might either be a list or a tuple, depending on - // whether there are dynamically-typed attributes inside. However, - // both support a similar-enough API that we can treat them the - // same for our purposes here. - if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - - if maybeUnknownBlocks { - // When unknown blocks are present the final blocks may be - // at different indices than the planned blocks, so unfortunately - // we can't do our usual checks in this case without generating - // false negatives. - continue - } - - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL != actualL { - errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - if !actualV.HasIndex(idx).True() { - continue - } - actualEV := actualV.Index(idx) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) - errs = append(errs, moreErrs...) - } - case configschema.NestingMap: - // A NestingMap might either be a map or an object, depending on - // whether there are dynamically-typed attributes inside, but - // that's decided statically and so both values will have the same - // kind. - if plannedV.Type().IsObjectType() { - plannedAtys := plannedV.Type().AttributeTypes() - actualAtys := actualV.Type().AttributeTypes() - for k := range plannedAtys { - if _, ok := actualAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q has vanished", k)) - continue - } - - plannedEV := plannedV.GetAttr(k) - actualEV := actualV.GetAttr(k) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k})) - errs = append(errs, moreErrs...) - } - if !maybeUnknownBlocks { // new blocks may appear if unknown blocks were present in the plan - for k := range actualAtys { - if _, ok := plannedAtys[k]; !ok { - errs = append(errs, path.NewErrorf("new block key %q has appeared", k)) - continue - } - } - } - } else { - if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL != actualL && !maybeUnknownBlocks { // new blocks may appear if unknown blocks were persent in the plan - errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - if !actualV.HasIndex(idx).True() { - continue - } - actualEV := actualV.Index(idx) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) - errs = append(errs, moreErrs...) - } - } - case configschema.NestingSet: - if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - - setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool { - errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV})) - return len(errs) == 0 - }) - errs = append(errs, setErrs...) - - if maybeUnknownBlocks { - // When unknown blocks are present the final number of blocks - // may be different, either because the unknown set values - // become equal and are collapsed, or the count is unknown due - // a dynamic block. Unfortunately this means we can't do our - // usual checks in this case without generating false - // negatives. - continue - } - - // There can be fewer elements in a set after its elements are all - // known (values that turn out to be equal will coalesce) but the - // number of elements must never get larger. - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL < actualL { - errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL)) - } - default: - panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) - } - } - return errs -} - -func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error { - // NOTE: We don't normally use the GoString rendering of cty.Value in - // user-facing error messages as a rule, but we make an exception - // for this function because we expect the user to pass this message on - // verbatim to the provider development team and so more detail is better. - - var errs []error - if planned.Type() == cty.DynamicPseudoType { - // Anything goes, then - return errs - } - if problems := planned.Type().TestConformance(actual.Type()); len(problems) > 0 { - errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type()))) - // If the types don't match then we can't do any other comparisons, - // so we bail early. - return errs - } - - if !planned.IsKnown() { - // We didn't know what were going to end up with during plan, so - // anything goes during apply. - return errs - } - - if actual.IsNull() { - if planned.IsNull() { - return nil - } - errs = append(errs, path.NewErrorf("was %#v, but now null", planned)) - return errs - } - if planned.IsNull() { - errs = append(errs, path.NewErrorf("was null, but now %#v", actual)) - return errs - } - - ty := planned.Type() - switch { - - case !actual.IsKnown(): - errs = append(errs, path.NewErrorf("was known, but now unknown")) - - case ty.IsPrimitiveType(): - if !actual.Equals(planned).True() { - errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual)) - } - - case ty.IsListType() || ty.IsMapType() || ty.IsTupleType(): - for it := planned.ElementIterator(); it.Next(); { - k, plannedV := it.Element() - if !actual.HasIndex(k).True() { - errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k))) - continue - } - - actualV := actual.Index(k) - moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k})) - errs = append(errs, moreErrs...) - } - - for it := actual.ElementIterator(); it.Next(); { - k, _ := it.Element() - if !planned.HasIndex(k).True() { - errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k))) - } - } - - case ty.IsObjectType(): - atys := ty.AttributeTypes() - for name := range atys { - // Because we already tested that the two values have the same type, - // we can assume that the same attributes are present in both and - // focus just on testing their values. - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name})) - errs = append(errs, moreErrs...) - } - - case ty.IsSetType(): - // We can't really do anything useful for sets here because changing - // an unknown element to known changes the identity of the element, and - // so we can't correlate them properly. However, we will at least check - // to ensure that the number of elements is consistent, along with - // the general type-match checks we ran earlier in this function. - if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() { - - setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool { - errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV})) - return len(errs) == 0 - }) - errs = append(errs, setErrs...) - - // There can be fewer elements in a set after its elements are all - // known (values that turn out to be equal will coalesce) but the - // number of elements must never get larger. - - plannedL := planned.LengthInt() - actualL := actual.LengthInt() - if plannedL < actualL { - errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL)) - } - } - } - - return errs -} - -func indexStrForErrors(v cty.Value) string { - switch v.Type() { - case cty.Number: - return v.AsBigFloat().Text('f', -1) - case cty.String: - return strconv.Quote(v.AsString()) - default: - // Should be impossible, since no other index types are allowed! - return fmt.Sprintf("%#v", v) - } -} - -// couldHaveUnknownBlockPlaceholder is a heuristic that recognizes how the -// HCL dynamic block extension behaves when it's asked to expand a block whose -// for_each argument is unknown. In such cases, it generates a single placeholder -// block with all leaf attribute values unknown, and once the for_each -// expression becomes known the placeholder may be replaced with any number -// of blocks, so object compatibility checks would need to be more liberal. -// -// Set "nested" if testing a block that is nested inside a candidate block -// placeholder; this changes the interpretation of there being no blocks of -// a type to allow for there being zero nested blocks. -func couldHaveUnknownBlockPlaceholder(v cty.Value, blockS *configschema.NestedBlock, nested bool) bool { - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - if nested && v.IsNull() { - return true // for nested blocks, a single block being unset doesn't disqualify from being an unknown block placeholder - } - return couldBeUnknownBlockPlaceholderElement(v, &blockS.Block) - default: - // These situations should be impossible for correct providers, but - // we permit the legacy SDK to produce some incorrect outcomes - // for compatibility with its existing logic, and so we must be - // tolerant here. - if !v.IsKnown() { - return true - } - if v.IsNull() { - return false // treated as if the list were empty, so we would see zero iterations below - } - - // For all other nesting modes, our value should be something iterable. - for it := v.ElementIterator(); it.Next(); { - _, ev := it.Element() - if couldBeUnknownBlockPlaceholderElement(ev, &blockS.Block) { - return true - } - } - - // Our default changes depending on whether we're testing the candidate - // block itself or something nested inside of it: zero blocks of a type - // can never contain a dynamic block placeholder, but a dynamic block - // placeholder might contain zero blocks of one of its own nested block - // types, if none were set in the config at all. - return nested - } -} - -func couldBeUnknownBlockPlaceholderElement(v cty.Value, schema *configschema.Block) bool { - if v.IsNull() { - return false // null value can never be a placeholder element - } - if !v.IsKnown() { - return true // this should never happen for well-behaved providers, but can happen with the legacy SDK opt-outs - } - for name := range schema.Attributes { - av := v.GetAttr(name) - - // Unknown block placeholders contain only unknown or null attribute - // values, depending on whether or not a particular attribute was set - // explicitly inside the content block. Note that this is imprecise: - // non-placeholders can also match this, so this function can generate - // false positives. - if av.IsKnown() && !av.IsNull() { - return false - } - } - for name, blockS := range schema.BlockTypes { - if !couldHaveUnknownBlockPlaceholder(v.GetAttr(name), blockS, true) { - return false - } - } - return true -} - -// assertSetValuesCompatible checks that each of the elements in a can -// be correlated with at least one equivalent element in b and vice-versa, -// using the given correlation function. -// -// This allows the number of elements in the sets to change as long as all -// elements in both sets can be correlated, making this function safe to use -// with sets that may contain unknown values as long as the unknown case is -// addressed in some reasonable way in the callback function. -// -// The callback always recieves values from set a as its first argument and -// values from set b in its second argument, so it is safe to use with -// non-commutative functions. -// -// As with assertValueCompatible, we assume that the target audience of error -// messages here is a provider developer (via a bug report from a user) and so -// we intentionally violate our usual rule of keeping cty implementation -// details out of error messages. -func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error { - a := planned - b := actual - - // Our methodology here is a little tricky, to deal with the fact that - // it's impossible to directly correlate two non-equal set elements because - // they don't have identities separate from their values. - // The approach is to count the number of equivalent elements each element - // of a has in b and vice-versa, and then return true only if each element - // in both sets has at least one equivalent. - as := a.AsValueSlice() - bs := b.AsValueSlice() - aeqs := make([]bool, len(as)) - beqs := make([]bool, len(bs)) - for ai, av := range as { - for bi, bv := range bs { - if f(av, bv) { - aeqs[ai] = true - beqs[bi] = true - } - } - } - - var errs []error - for i, eq := range aeqs { - if !eq { - errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i])) - } - } - if len(errs) > 0 { - // Exit early since otherwise we're likely to generate duplicate - // error messages from the other perspective in the subsequent loop. - return errs - } - for i, eq := range beqs { - if !eq { - errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i])) - } - } - return errs -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go deleted file mode 100644 index 2c18a010..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package objchange deals with the business logic of taking a prior state -// value and a config value and producing a proposed new merged value, along -// with other related rules in this domain. -package objchange diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go deleted file mode 100644 index cbfefddd..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go +++ /dev/null @@ -1,104 +0,0 @@ -package objchange - -import ( - "github.com/zclconf/go-cty/cty" -) - -// LongestCommonSubsequence finds a sequence of values that are common to both -// x and y, with the same relative ordering as in both collections. This result -// is useful as a first step towards computing a diff showing added/removed -// elements in a sequence. -// -// The approached used here is a "naive" one, assuming that both xs and ys will -// generally be small in most reasonable Terraform configurations. For larger -// lists the time/space usage may be sub-optimal. -// -// A pair of lists may have multiple longest common subsequences. In that -// case, the one selected by this function is undefined. -func LongestCommonSubsequence(xs, ys []cty.Value) []cty.Value { - if len(xs) == 0 || len(ys) == 0 { - return make([]cty.Value, 0) - } - - c := make([]int, len(xs)*len(ys)) - eqs := make([]bool, len(xs)*len(ys)) - w := len(xs) - - for y := 0; y < len(ys); y++ { - for x := 0; x < len(xs); x++ { - eqV := xs[x].Equals(ys[y]) - eq := false - if eqV.IsKnown() && eqV.True() { - eq = true - eqs[(w*y)+x] = true // equality tests can be expensive, so cache it - } - if eq { - // Sequence gets one longer than for the cell at top left, - // since we'd append a new item to the sequence here. - if x == 0 || y == 0 { - c[(w*y)+x] = 1 - } else { - c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1 - } - } else { - // We follow the longest of the sequence above and the sequence - // to the left of us in the matrix. - l := 0 - u := 0 - if x > 0 { - l = c[(w*y)+(x-1)] - } - if y > 0 { - u = c[(w*(y-1))+x] - } - if l > u { - c[(w*y)+x] = l - } else { - c[(w*y)+x] = u - } - } - } - } - - // The bottom right cell tells us how long our longest sequence will be - seq := make([]cty.Value, c[len(c)-1]) - - // Now we will walk back from the bottom right cell, finding again all - // of the equal pairs to construct our sequence. - x := len(xs) - 1 - y := len(ys) - 1 - i := len(seq) - 1 - - for x > -1 && y > -1 { - if eqs[(w*y)+x] { - // Add the value to our result list and then walk diagonally - // up and to the left. - seq[i] = xs[x] - x-- - y-- - i-- - } else { - // Take the path with the greatest sequence length in the matrix. - l := 0 - u := 0 - if x > 0 { - l = c[(w*y)+(x-1)] - } - if y > 0 { - u = c[(w*(y-1))+x] - } - if l > u { - x-- - } else { - y-- - } - } - } - - if i > -1 { - // should never happen if the matrix was constructed properly - panic("not enough elements in sequence") - } - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go deleted file mode 100644 index 879fc93a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go +++ /dev/null @@ -1,390 +0,0 @@ -package objchange - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" -) - -// ProposedNewObject constructs a proposed new object value by combining the -// computed attribute values from "prior" with the configured attribute values -// from "config". -// -// Both value must conform to the given schema's implied type, or this function -// will panic. -// -// The prior value must be wholly known, but the config value may be unknown -// or have nested unknown values. -// -// The merging of the two objects includes the attributes of any nested blocks, -// which will be correlated in a manner appropriate for their nesting mode. -// Note in particular that the correlation for blocks backed by sets is a -// heuristic based on matching non-computed attribute values and so it may -// produce strange results with more "extreme" cases, such as a nested set -// block where _all_ attributes are computed. -func ProposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { - // If the config and prior are both null, return early here before - // populating the prior block. The prevents non-null blocks from appearing - // the proposed state value. - if config.IsNull() && prior.IsNull() { - return prior - } - - if prior.IsNull() { - // In this case, we will construct a synthetic prior value that is - // similar to the result of decoding an empty configuration block, - // which simplifies our handling of the top-level attributes/blocks - // below by giving us one non-null level of object to pull values from. - prior = AllAttributesNull(schema) - } - return proposedNewObject(schema, prior, config) -} - -// PlannedDataResourceObject is similar to ProposedNewObject but tailored for -// planning data resources in particular. Specifically, it replaces the values -// of any Computed attributes not set in the configuration with an unknown -// value, which serves as a placeholder for a value to be filled in by the -// provider when the data resource is finally read. -// -// Data resources are different because the planning of them is handled -// entirely within Terraform Core and not subject to customization by the -// provider. This function is, in effect, producing an equivalent result to -// passing the ProposedNewObject result into a provider's PlanResourceChange -// function, assuming a fixed implementation of PlanResourceChange that just -// fills in unknown values as needed. -func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value { - // Our trick here is to run the ProposedNewObject logic with an - // entirely-unknown prior value. Because of cty's unknown short-circuit - // behavior, any operation on prior returns another unknown, and so - // unknown values propagate into all of the parts of the resulting value - // that would normally be filled in by preserving the prior state. - prior := cty.UnknownVal(schema.ImpliedType()) - return proposedNewObject(schema, prior, config) -} - -func proposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { - if config.IsNull() || !config.IsKnown() { - // This is a weird situation, but we'll allow it anyway to free - // callers from needing to specifically check for these cases. - return prior - } - if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) { - panic("ProposedNewObject only supports object-typed values") - } - - // From this point onwards, we can assume that both values are non-null - // object types, and that the config value itself is known (though it - // may contain nested values that are unknown.) - - newAttrs := map[string]cty.Value{} - for name, attr := range schema.Attributes { - priorV := prior.GetAttr(name) - configV := config.GetAttr(name) - var newV cty.Value - switch { - case attr.Computed && attr.Optional: - // This is the trickiest scenario: we want to keep the prior value - // if the config isn't overriding it. Note that due to some - // ambiguity here, setting an optional+computed attribute from - // config and then later switching the config to null in a - // subsequent change causes the initial config value to be "sticky" - // unless the provider specifically overrides it during its own - // plan customization step. - if configV.IsNull() { - newV = priorV - } else { - newV = configV - } - case attr.Computed: - // configV will always be null in this case, by definition. - // priorV may also be null, but that's okay. - newV = priorV - default: - // For non-computed attributes, we always take the config value, - // even if it is null. If it's _required_ then null values - // should've been caught during an earlier validation step, and - // so we don't really care about that here. - newV = configV - } - newAttrs[name] = newV - } - - // Merging nested blocks is a little more complex, since we need to - // correlate blocks between both objects and then recursively propose - // a new object for each. The correlation logic depends on the nesting - // mode for each block type. - for name, blockType := range schema.BlockTypes { - priorV := prior.GetAttr(name) - configV := config.GetAttr(name) - var newV cty.Value - switch blockType.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - newV = ProposedNewObject(&blockType.Block, priorV, configV) - - case configschema.NestingList: - // Nested blocks are correlated by index. - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make([]cty.Value, 0, configVLen) - for it := configV.ElementIterator(); it.Next(); { - idx, configEV := it.Element() - if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals = append(newVals, configEV) - continue - } - priorEV := priorV.Index(idx) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals = append(newVals, newEV) - } - // Despite the name, a NestingList might also be a tuple, if - // its nested schema contains dynamically-typed attributes. - if configV.Type().IsTupleType() { - newV = cty.TupleVal(newVals) - } else { - newV = cty.ListVal(newVals) - } - } else { - // Despite the name, a NestingList might also be a tuple, if - // its nested schema contains dynamically-typed attributes. - if configV.Type().IsTupleType() { - newV = cty.EmptyTupleVal - } else { - newV = cty.ListValEmpty(blockType.ImpliedType()) - } - } - - case configschema.NestingMap: - // Despite the name, a NestingMap may produce either a map or - // object value, depending on whether the nested schema contains - // dynamically-typed attributes. - if configV.Type().IsObjectType() { - // Nested blocks are correlated by key. - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make(map[string]cty.Value, configVLen) - atys := configV.Type().AttributeTypes() - for name := range atys { - configEV := configV.GetAttr(name) - if !priorV.IsKnown() || priorV.IsNull() || !priorV.Type().HasAttribute(name) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals[name] = configEV - continue - } - priorEV := priorV.GetAttr(name) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals[name] = newEV - } - // Although we call the nesting mode "map", we actually use - // object values so that elements might have different types - // in case of dynamically-typed attributes. - newV = cty.ObjectVal(newVals) - } else { - newV = cty.EmptyObjectVal - } - } else { - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make(map[string]cty.Value, configVLen) - for it := configV.ElementIterator(); it.Next(); { - idx, configEV := it.Element() - k := idx.AsString() - if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals[k] = configEV - continue - } - priorEV := priorV.Index(idx) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals[k] = newEV - } - newV = cty.MapVal(newVals) - } else { - newV = cty.MapValEmpty(blockType.ImpliedType()) - } - } - - case configschema.NestingSet: - if !configV.Type().IsSetType() { - panic("configschema.NestingSet value is not a set as expected") - } - - // Nested blocks are correlated by comparing the element values - // after eliminating all of the computed attributes. In practice, - // this means that any config change produces an entirely new - // nested object, and we only propagate prior computed values - // if the non-computed attribute values are identical. - var cmpVals [][2]cty.Value - if priorV.IsKnown() && !priorV.IsNull() { - cmpVals = setElementCompareValues(&blockType.Block, priorV, false) - } - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - used := make([]bool, len(cmpVals)) // track used elements in case multiple have the same compare value - newVals := make([]cty.Value, 0, configVLen) - for it := configV.ElementIterator(); it.Next(); { - _, configEV := it.Element() - var priorEV cty.Value - for i, cmp := range cmpVals { - if used[i] { - continue - } - if cmp[1].RawEquals(configEV) { - priorEV = cmp[0] - used[i] = true // we can't use this value on a future iteration - break - } - } - if priorEV == cty.NilVal { - priorEV = cty.NullVal(blockType.ImpliedType()) - } - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals = append(newVals, newEV) - } - newV = cty.SetVal(newVals) - } else { - newV = cty.SetValEmpty(blockType.Block.ImpliedType()) - } - - default: - // Should never happen, since the above cases are comprehensive. - panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) - } - - newAttrs[name] = newV - } - - return cty.ObjectVal(newAttrs) -} - -// setElementCompareValues takes a known, non-null value of a cty.Set type and -// returns a table -- constructed of two-element arrays -- that maps original -// set element values to corresponding values that have all of the computed -// values removed, making them suitable for comparison with values obtained -// from configuration. The element type of the set must conform to the implied -// type of the given schema, or this function will panic. -// -// In the resulting slice, the zeroth element of each array is the original -// value and the one-indexed element is the corresponding "compare value". -// -// This is intended to help correlate prior elements with configured elements -// in ProposedNewObject. The result is a heuristic rather than an exact science, -// since e.g. two separate elements may reduce to the same value through this -// process. The caller must therefore be ready to deal with duplicates. -func setElementCompareValues(schema *configschema.Block, set cty.Value, isConfig bool) [][2]cty.Value { - ret := make([][2]cty.Value, 0, set.LengthInt()) - for it := set.ElementIterator(); it.Next(); { - _, ev := it.Element() - ret = append(ret, [2]cty.Value{ev, setElementCompareValue(schema, ev, isConfig)}) - } - return ret -} - -// setElementCompareValue creates a new value that has all of the same -// non-computed attribute values as the one given but has all computed -// attribute values forced to null. -// -// If isConfig is true then non-null Optional+Computed attribute values will -// be preserved. Otherwise, they will also be set to null. -// -// The input value must conform to the schema's implied type, and the return -// value is guaranteed to conform to it. -func setElementCompareValue(schema *configschema.Block, v cty.Value, isConfig bool) cty.Value { - if v.IsNull() || !v.IsKnown() { - return v - } - - attrs := map[string]cty.Value{} - for name, attr := range schema.Attributes { - switch { - case attr.Computed && attr.Optional: - if isConfig { - attrs[name] = v.GetAttr(name) - } else { - attrs[name] = cty.NullVal(attr.Type) - } - case attr.Computed: - attrs[name] = cty.NullVal(attr.Type) - default: - attrs[name] = v.GetAttr(name) - } - } - - for name, blockType := range schema.BlockTypes { - switch blockType.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - attrs[name] = setElementCompareValue(&blockType.Block, v.GetAttr(name), isConfig) - - case configschema.NestingList, configschema.NestingSet: - cv := v.GetAttr(name) - if cv.IsNull() || !cv.IsKnown() { - attrs[name] = cv - continue - } - if l := cv.LengthInt(); l > 0 { - elems := make([]cty.Value, 0, l) - for it := cv.ElementIterator(); it.Next(); { - _, ev := it.Element() - elems = append(elems, setElementCompareValue(&blockType.Block, ev, isConfig)) - } - if blockType.Nesting == configschema.NestingSet { - // SetValEmpty would panic if given elements that are not - // all of the same type, but that's guaranteed not to - // happen here because our input value was _already_ a - // set and we've not changed the types of any elements here. - attrs[name] = cty.SetVal(elems) - } else { - attrs[name] = cty.TupleVal(elems) - } - } else { - if blockType.Nesting == configschema.NestingSet { - attrs[name] = cty.SetValEmpty(blockType.Block.ImpliedType()) - } else { - attrs[name] = cty.EmptyTupleVal - } - } - - case configschema.NestingMap: - cv := v.GetAttr(name) - if cv.IsNull() || !cv.IsKnown() { - attrs[name] = cv - continue - } - elems := make(map[string]cty.Value) - for it := cv.ElementIterator(); it.Next(); { - kv, ev := it.Element() - elems[kv.AsString()] = setElementCompareValue(&blockType.Block, ev, isConfig) - } - attrs[name] = cty.ObjectVal(elems) - - default: - // Should never happen, since the above cases are comprehensive. - panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) - } - } - - return cty.ObjectVal(attrs) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go deleted file mode 100644 index 905a9114..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go +++ /dev/null @@ -1,267 +0,0 @@ -package objchange - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" -) - -// AssertPlanValid checks checks whether a planned new state returned by a -// provider's PlanResourceChange method is suitable to achieve a change -// from priorState to config. It returns a slice with nonzero length if -// any problems are detected. Because problems here indicate bugs in the -// provider that generated the plannedState, they are written with provider -// developers as an audience, rather than end-users. -// -// All of the given values must have the same type and must conform to the -// implied type of the given schema, or this function may panic or produce -// garbage results. -// -// During planning, a provider may only make changes to attributes that are -// null (unset) in the configuration and are marked as "computed" in the -// resource type schema, in order to insert any default values the provider -// may know about. If the default value cannot be determined until apply time, -// the provider can return an unknown value. Providers are forbidden from -// planning a change that disagrees with any non-null argument in the -// configuration. -// -// As a special exception, providers _are_ allowed to provide attribute values -// conflicting with configuration if and only if the planned value exactly -// matches the corresponding attribute value in the prior state. The provider -// can use this to signal that the new value is functionally equivalent to -// the old and thus no change is required. -func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error { - return assertPlanValid(schema, priorState, config, plannedState, nil) -} - -func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error { - var errs []error - if plannedState.IsNull() && !config.IsNull() { - errs = append(errs, path.NewErrorf("planned for absense but config wants existence")) - return errs - } - if config.IsNull() && !plannedState.IsNull() { - errs = append(errs, path.NewErrorf("planned for existence but config wants absense")) - return errs - } - if plannedState.IsNull() { - // No further checks possible if the planned value is null - return errs - } - - impTy := schema.ImpliedType() - - for name, attrS := range schema.Attributes { - plannedV := plannedState.GetAttr(name) - configV := config.GetAttr(name) - priorV := cty.NullVal(attrS.Type) - if !priorState.IsNull() { - priorV = priorState.GetAttr(name) - } - - path := append(path, cty.GetAttrStep{Name: name}) - moreErrs := assertPlannedValueValid(attrS, priorV, configV, plannedV, path) - errs = append(errs, moreErrs...) - } - for name, blockS := range schema.BlockTypes { - path := append(path, cty.GetAttrStep{Name: name}) - plannedV := plannedState.GetAttr(name) - configV := config.GetAttr(name) - priorV := cty.NullVal(impTy.AttributeType(name)) - if !priorState.IsNull() { - priorV = priorState.GetAttr(name) - } - if plannedV.RawEquals(configV) { - // Easy path: nothing has changed at all - continue - } - if !plannedV.IsKnown() { - errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path) - errs = append(errs, moreErrs...) - case configschema.NestingList: - // A NestingList might either be a list or a tuple, depending on - // whether there are dynamically-typed attributes inside. However, - // both support a similar-enough API that we can treat them the - // same for our purposes here. - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - plannedL := plannedV.LengthInt() - configL := configV.LengthInt() - if plannedL != configL { - errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - if !configV.HasIndex(idx).True() { - continue // should never happen since we checked the lengths above - } - configEV := configV.Index(idx) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.HasIndex(idx).True() { - priorEV = priorV.Index(idx) - } - - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - case configschema.NestingMap: - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - // A NestingMap might either be a map or an object, depending on - // whether there are dynamically-typed attributes inside, but - // that's decided statically and so all values will have the same - // kind. - if plannedV.Type().IsObjectType() { - plannedAtys := plannedV.Type().AttributeTypes() - configAtys := configV.Type().AttributeTypes() - for k := range plannedAtys { - if _, ok := configAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) - continue - } - path := append(path, cty.GetAttrStep{Name: k}) - - plannedEV := plannedV.GetAttr(k) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - configEV := configV.GetAttr(k) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.Type().HasAttribute(k) { - priorEV = priorV.GetAttr(k) - } - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - for k := range configAtys { - if _, ok := plannedAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k)) - continue - } - } - } else { - plannedL := plannedV.LengthInt() - configL := configV.LengthInt() - if plannedL != configL { - errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - k := idx.AsString() - if !configV.HasIndex(idx).True() { - errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) - continue - } - configEV := configV.Index(idx) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.HasIndex(idx).True() { - priorEV = priorV.Index(idx) - } - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - for it := configV.ElementIterator(); it.Next(); { - idx, _ := it.Element() - if !plannedV.HasIndex(idx).True() { - errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString())) - continue - } - } - } - case configschema.NestingSet: - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - // Because set elements have no identifier with which to correlate - // them, we can't robustly validate the plan for a nested block - // backed by a set, and so unfortunately we need to just trust the - // provider to do the right thing. :( - // - // (In principle we could correlate elements by matching the - // subset of attributes explicitly set in config, except for the - // special diff suppression rule which allows for there to be a - // planned value that is constructed by mixing part of a prior - // value with part of a config value, creating an entirely new - // element that is not present in either prior nor config.) - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - } - - default: - panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) - } - } - - return errs -} - -func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error { - var errs []error - if plannedV.RawEquals(configV) { - // This is the easy path: provider didn't change anything at all. - return errs - } - if plannedV.RawEquals(priorV) && !priorV.IsNull() { - // Also pretty easy: there is a prior value and the provider has - // returned it unchanged. This indicates that configV and plannedV - // are functionally equivalent and so the provider wishes to disregard - // the configuration value in favor of the prior. - return errs - } - if attrS.Computed && configV.IsNull() { - // The provider is allowed to change the value of any computed - // attribute that isn't explicitly set in the config. - return errs - } - - // If none of the above conditions match, the provider has made an invalid - // change to this attribute. - if priorV.IsNull() { - if attrS.Sensitive { - errs = append(errs, path.NewErrorf("sensitive planned value does not match config value")) - } else { - errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV)) - } - return errs - } - if attrS.Sensitive { - errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value")) - } else { - errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV)) - } - return errs -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go deleted file mode 100644 index 0abed56a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go +++ /dev/null @@ -1,92 +0,0 @@ -package plans - -import ( - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// Plan is the top-level type representing a planned set of changes. -// -// A plan is a summary of the set of changes required to move from a current -// state to a goal state derived from configuration. The described changes -// are not applied directly, but contain an approximation of the final -// result that will be completed during apply by resolving any values that -// cannot be predicted. -// -// A plan must always be accompanied by the state and configuration it was -// built from, since the plan does not itself include all of the information -// required to make the changes indicated. -type Plan struct { - VariableValues map[string]DynamicValue - Changes *Changes - TargetAddrs []addrs.Targetable - ProviderSHA256s map[string][]byte - Backend Backend -} - -// Backend represents the backend-related configuration and other data as it -// existed when a plan was created. -type Backend struct { - // Type is the type of backend that the plan will apply against. - Type string - - // Config is the configuration of the backend, whose schema is decided by - // the backend Type. - Config DynamicValue - - // Workspace is the name of the workspace that was active when the plan - // was created. It is illegal to apply a plan created for one workspace - // to the state of another workspace. - // (This constraint is already enforced by the statefile lineage mechanism, - // but storing this explicitly allows us to return a better error message - // in the situation where the user has the wrong workspace selected.) - Workspace string -} - -func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { - dv, err := NewDynamicValue(config, configSchema.ImpliedType()) - if err != nil { - return nil, err - } - - return &Backend{ - Type: typeName, - Config: dv, - Workspace: workspaceName, - }, nil -} - -// ProviderAddrs returns a list of all of the provider configuration addresses -// referenced throughout the receiving plan. -// -// The result is de-duplicated so that each distinct address appears only once. -func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { - if p == nil || p.Changes == nil { - return nil - } - - m := map[string]addrs.AbsProviderConfig{} - for _, rc := range p.Changes.Resources { - m[rc.ProviderAddr.String()] = rc.ProviderAddr - } - if len(m) == 0 { - return nil - } - - // This is mainly just so we'll get stable results for testing purposes. - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - ret := make([]addrs.AbsProviderConfig, len(keys)) - for i, key := range keys { - ret[i] = m[key] - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go deleted file mode 100644 index 729e9709..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go +++ /dev/null @@ -1,64 +0,0 @@ -package discovery - -// Error is a type used to describe situations that the caller must handle -// since they indicate some form of user error. -// -// The functions and methods that return these specialized errors indicate so -// in their documentation. The Error type should not itself be used directly, -// but rather errors should be compared using the == operator with the -// error constants in this package. -// -// Values of this type are _not_ used when the error being reported is an -// operational error (server unavailable, etc) or indicative of a bug in -// this package or its caller. -type Error string - -// ErrorNoSuitableVersion indicates that a suitable version (meeting given -// constraints) is not available. -const ErrorNoSuitableVersion = Error("no suitable version is available") - -// ErrorNoVersionCompatible indicates that all of the available versions -// that otherwise met constraints are not compatible with the current -// version of Terraform. -const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") - -// ErrorVersionIncompatible indicates that all of the versions within the -// constraints are not compatible with the current version of Terrafrom, though -// there does exist a version outside of the constaints that is compatible. -const ErrorVersionIncompatible = Error("incompatible provider version") - -// ErrorNoSuchProvider indicates that no provider exists with a name given -const ErrorNoSuchProvider = Error("no provider exists with the given name") - -// ErrorNoVersionCompatibleWithPlatform indicates that all of the available -// versions that otherwise met constraints are not compatible with the -// requested platform -const ErrorNoVersionCompatibleWithPlatform = Error("no available version is compatible for the requested platform") - -// ErrorMissingChecksumVerification indicates that either the provider -// distribution is missing the SHA256SUMS file or the checksum file does -// not contain a checksum for the binary plugin -const ErrorMissingChecksumVerification = Error("unable to verify checksum") - -// ErrorChecksumVerification indicates that the current checksum of the -// provider plugin has changed since the initial release and is not trusted -// to download -const ErrorChecksumVerification = Error("unexpected plugin checksum") - -// ErrorSignatureVerification indicates that the digital signature for a -// provider distribution could not be verified for one of the following -// reasons: missing signature file, missing public key, or the signature -// was not signed by any known key for the publisher -const ErrorSignatureVerification = Error("unable to verify signature") - -// ErrorServiceUnreachable indicates that the network was unable to connect -// to the registry service -const ErrorServiceUnreachable = Error("registry service is unreachable") - -// ErrorPublicRegistryUnreachable indicates that the network was unable to connect -// to the public registry in particular, so we can show a link to the statuspage -const ErrorPublicRegistryUnreachable = Error("registry service is unreachable, check https://status.hashicorp.com/ for status updates") - -func (err Error) Error() string { - return string(err) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go deleted file mode 100644 index f053312b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go +++ /dev/null @@ -1,191 +0,0 @@ -package discovery - -import ( - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" -) - -// FindPlugins looks in the given directories for files whose filenames -// suggest that they are plugins of the given kind (e.g. "provider") and -// returns a PluginMetaSet representing the discovered potential-plugins. -// -// Currently this supports two different naming schemes. The current -// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing -// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is -// files directly in the given directory whose names are like -// terraform-$KIND-$NAME. -// -// Only one plugin will be returned for each unique plugin (name, version) -// pair, with preference given to files found in earlier directories. -// -// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths. -func FindPlugins(kind string, dirs []string) PluginMetaSet { - return ResolvePluginPaths(FindPluginPaths(kind, dirs)) -} - -// FindPluginPaths looks in the given directories for files whose filenames -// suggest that they are plugins of the given kind (e.g. "provider"). -// -// The return value is a list of absolute paths that appear to refer to -// plugins in the given directories, based only on what can be inferred -// from the naming scheme. The paths returned are ordered such that files -// in later dirs appear after files in earlier dirs in the given directory -// list. Within the same directory plugins are returned in a consistent but -// undefined order. -func FindPluginPaths(kind string, dirs []string) []string { - // This is just a thin wrapper around findPluginPaths so that we can - // use the latter in tests with a fake machineName so we can use our - // test fixtures. - return findPluginPaths(kind, dirs) -} - -func findPluginPaths(kind string, dirs []string) []string { - prefix := "terraform-" + kind + "-" - - ret := make([]string, 0, len(dirs)) - - for _, dir := range dirs { - items, err := ioutil.ReadDir(dir) - if err != nil { - // Ignore missing dirs, non-dirs, etc - continue - } - - log.Printf("[DEBUG] checking for %s in %q", kind, dir) - - for _, item := range items { - fullName := item.Name() - - if !strings.HasPrefix(fullName, prefix) { - continue - } - - // New-style paths must have a version segment in filename - if strings.Contains(strings.ToLower(fullName), "_v") { - absPath, err := filepath.Abs(filepath.Join(dir, fullName)) - if err != nil { - log.Printf("[ERROR] plugin filepath error: %s", err) - continue - } - - // Check that the file we found is usable - if !pathIsFile(absPath) { - log.Printf("[ERROR] ignoring non-file %s", absPath) - continue - } - - log.Printf("[DEBUG] found %s %q", kind, fullName) - ret = append(ret, filepath.Clean(absPath)) - continue - } - - // Legacy style with files directly in the base directory - absPath, err := filepath.Abs(filepath.Join(dir, fullName)) - if err != nil { - log.Printf("[ERROR] plugin filepath error: %s", err) - continue - } - - // Check that the file we found is usable - if !pathIsFile(absPath) { - log.Printf("[ERROR] ignoring non-file %s", absPath) - continue - } - - log.Printf("[WARN] found legacy %s %q", kind, fullName) - - ret = append(ret, filepath.Clean(absPath)) - } - } - - return ret -} - -// Returns true if and only if the given path refers to a file or a symlink -// to a file. -func pathIsFile(path string) bool { - info, err := os.Stat(path) - if err != nil { - return false - } - - return !info.IsDir() -} - -// ResolvePluginPaths takes a list of paths to plugin executables (as returned -// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the -// referenced plugins. -// -// If the same combination of plugin name and version appears multiple times, -// the earlier reference will be preferred. Several different versions of -// the same plugin name may be returned, in which case the methods of -// PluginMetaSet can be used to filter down. -func ResolvePluginPaths(paths []string) PluginMetaSet { - s := make(PluginMetaSet) - - type nameVersion struct { - Name string - Version string - } - found := make(map[nameVersion]struct{}) - - for _, path := range paths { - baseName := strings.ToLower(filepath.Base(path)) - if !strings.HasPrefix(baseName, "terraform-") { - // Should never happen with reasonable input - continue - } - - baseName = baseName[10:] - firstDash := strings.Index(baseName, "-") - if firstDash == -1 { - // Should never happen with reasonable input - continue - } - - baseName = baseName[firstDash+1:] - if baseName == "" { - // Should never happen with reasonable input - continue - } - - // Trim the .exe suffix used on Windows before we start wrangling - // the remainder of the path. - if strings.HasSuffix(baseName, ".exe") { - baseName = baseName[:len(baseName)-4] - } - - parts := strings.SplitN(baseName, "_v", 2) - name := parts[0] - version := VersionZero - if len(parts) == 2 { - version = parts[1] - } - - // Auto-installed plugins contain an extra name portion representing - // the expected plugin version, which we must trim off. - if underX := strings.Index(version, "_x"); underX != -1 { - version = version[:underX] - } - - if _, ok := found[nameVersion{name, version}]; ok { - // Skip duplicate versions of the same plugin - // (We do this during this step because after this we will be - // dealing with sets and thus lose our ordering with which to - // decide preference.) - continue - } - - s.Add(PluginMeta{ - Name: name, - Version: VersionStr(version), - Path: path, - }) - found[nameVersion{name, version}] = struct{}{} - } - - return s -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go deleted file mode 100644 index 722bb28a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go +++ /dev/null @@ -1,669 +0,0 @@ -package discovery - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - - "github.com/hashicorp/errwrap" - getter "github.com/hashicorp/go-getter" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/httpclient" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/response" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/mitchellh/cli" -) - -// Releases are located by querying the terraform registry. - -var httpClient *http.Client - -func init() { - httpClient = httpclient.New() - - httpGetter := &getter.HttpGetter{ - Client: httpClient, - Netrc: true, - } - - getter.Getters["http"] = httpGetter - getter.Getters["https"] = httpGetter -} - -// ProviderInstaller is an Installer implementation that knows how to -// download Terraform providers from the official HashiCorp releases service -// into a local directory. The files downloaded are compliant with the -// naming scheme expected by FindPlugins, so the target directory of a -// provider installer can be used as one of several plugin discovery sources. -type ProviderInstaller struct { - Dir string - - // Cache is used to access and update a local cache of plugins if non-nil. - // Can be nil to disable caching. - Cache PluginCache - - PluginProtocolVersion uint - - // OS and Arch specify the OS and architecture that should be used when - // installing plugins. These use the same labels as the runtime.GOOS and - // runtime.GOARCH variables respectively, and indeed the values of these - // are used as defaults if either of these is the empty string. - OS string - Arch string - - // Skip checksum and signature verification - SkipVerify bool - - Ui cli.Ui // Ui for output - - // Services is a required *disco.Disco, which may have services and - // credentials pre-loaded. - Services *disco.Disco - - // registry client - registry *registry.Client -} - -// Get is part of an implementation of type Installer, and attempts to download -// and install a Terraform provider matching the given constraints. -// -// This method may return one of a number of sentinel errors from this -// package to indicate issues that are likely to be resolvable via user action: -// -// ErrorNoSuchProvider: no provider with the given name exists in the repository. -// ErrorNoSuitableVersion: the provider exists but no available version matches constraints. -// ErrorNoVersionCompatible: a plugin was found within the constraints but it is -// incompatible with the current Terraform version. -// -// These errors should be recognized and handled as special cases by the caller -// to present a suitable user-oriented error message. -// -// All other errors indicate an internal problem that is likely _not_ solvable -// through user action, or at least not within Terraform's scope. Error messages -// are produced under the assumption that if presented to the user they will -// be presented alongside context about what is being installed, and thus the -// error messages do not redundantly include such information. -func (i *ProviderInstaller) Get(provider addrs.ProviderType, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) { - var diags tfdiags.Diagnostics - - // a little bit of initialization. - if i.OS == "" { - i.OS = runtime.GOOS - } - if i.Arch == "" { - i.Arch = runtime.GOARCH - } - if i.registry == nil { - i.registry = registry.NewClient(i.Services, nil) - } - - // get a full listing of versions for the requested provider - allVersions, err := i.listProviderVersions(provider) - - // TODO: return multiple errors - if err != nil { - log.Printf("[DEBUG] %s", err) - if registry.IsServiceUnreachable(err) { - registryHost, err := i.hostname() - if err == nil && registryHost == regsrc.PublicRegistryHost.Raw { - return PluginMeta{}, diags, ErrorPublicRegistryUnreachable - } - return PluginMeta{}, diags, ErrorServiceUnreachable - } - if registry.IsServiceNotProvided(err) { - return PluginMeta{}, diags, err - } - return PluginMeta{}, diags, ErrorNoSuchProvider - } - - // Add any warnings from the response to diags - for _, warning := range allVersions.Warnings { - hostname, err := i.hostname() - if err != nil { - return PluginMeta{}, diags, err - } - diag := tfdiags.SimpleWarning(fmt.Sprintf("%s: %s", hostname, warning)) - diags = diags.Append(diag) - } - - if len(allVersions.Versions) == 0 { - return PluginMeta{}, diags, ErrorNoSuitableVersion - } - providerSource := allVersions.ID - - // Filter the list of plugin versions to those which meet the version constraints - versions := allowedVersions(allVersions, req) - if len(versions) == 0 { - return PluginMeta{}, diags, ErrorNoSuitableVersion - } - - // sort them newest to oldest. The newest version wins! - response.ProviderVersionCollection(versions).Sort() - - // if the chosen provider version does not support the requested platform, - // filter the list of acceptable versions to those that support that platform - if err := i.checkPlatformCompatibility(versions[0]); err != nil { - versions = i.platformCompatibleVersions(versions) - if len(versions) == 0 { - return PluginMeta{}, diags, ErrorNoVersionCompatibleWithPlatform - } - } - - // we now have a winning platform-compatible version - versionMeta := versions[0] - v := VersionStr(versionMeta.Version).MustParse() - - // check protocol compatibility - if err := i.checkPluginProtocol(versionMeta); err != nil { - closestMatch, err := i.findClosestProtocolCompatibleVersion(allVersions.Versions) - if err != nil { - // No operation here if we can't find a version with compatible protocol - return PluginMeta{}, diags, err - } - - // Prompt version suggestion to UI based on closest protocol match - var errMsg string - closestVersion := VersionStr(closestMatch.Version).MustParse() - if v.NewerThan(closestVersion) { - errMsg = providerProtocolTooNew - } else { - errMsg = providerProtocolTooOld - } - - constraintStr := req.String() - if constraintStr == "" { - constraintStr = "(any version)" - } - - return PluginMeta{}, diags, errwrap.Wrap(ErrorVersionIncompatible, fmt.Errorf(fmt.Sprintf( - errMsg, provider, v.String(), tfversion.String(), - closestVersion.String(), closestVersion.MinorUpgradeConstraintStr(), constraintStr))) - } - - downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version) - if err != nil { - return PluginMeta{}, diags, err - } - providerURL := downloadURLs.DownloadURL - - if !i.SkipVerify { - // Terraform verifies the integrity of a provider release before downloading - // the plugin binary. The digital signature (SHA256SUMS.sig) on the - // release distribution (SHA256SUMS) is verified with the public key of the - // publisher provided in the Terraform Registry response, ensuring that - // everything is as intended by the publisher. The checksum of the provider - // plugin is expected in the SHA256SUMS file and is double checked to match - // the checksum of the original published release to the Registry. This - // enforces immutability of releases between the Registry and the plugin's - // host location. Lastly, the integrity of the binary is verified upon - // download matches the Registry and signed checksum. - sha256, err := i.getProviderChecksum(downloadURLs) - if err != nil { - return PluginMeta{}, diags, err - } - - // add the checksum parameter for go-getter to verify the download for us. - if sha256 != "" { - providerURL = providerURL + "?checksum=sha256:" + sha256 - } - } - - printedProviderName := fmt.Sprintf("%q (%s)", provider.Name, providerSource) - i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %s %s...", printedProviderName, versionMeta.Version)) - log.Printf("[DEBUG] getting provider %s version %q", printedProviderName, versionMeta.Version) - err = i.install(provider, v, providerURL) - if err != nil { - return PluginMeta{}, diags, err - } - - // Find what we just installed - // (This is weird, because go-getter doesn't directly return - // information about what was extracted, and we just extracted - // the archive directly into a shared dir here.) - log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider.Name, versionMeta.Version) - metas := FindPlugins("provider", []string{i.Dir}) - log.Printf("[DEBUG] all plugins found %#v", metas) - metas, _ = metas.ValidateVersions() - metas = metas.WithName(provider.Name).WithVersion(v) - log.Printf("[DEBUG] filtered plugins %#v", metas) - if metas.Count() == 0 { - // This should never happen. Suggests that the release archive - // contains an executable file whose name doesn't match the - // expected convention. - return PluginMeta{}, diags, fmt.Errorf( - "failed to find installed plugin version %s; this is a bug in Terraform and should be reported", - versionMeta.Version, - ) - } - - if metas.Count() > 1 { - // This should also never happen, and suggests that a - // particular version was re-released with a different - // executable filename. We consider releases as immutable, so - // this is an error. - return PluginMeta{}, diags, fmt.Errorf( - "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", - versionMeta.Version, - ) - } - - // By now we know we have exactly one meta, and so "Newest" will - // return that one. - return metas.Newest(), diags, nil -} - -func (i *ProviderInstaller) install(provider addrs.ProviderType, version Version, url string) error { - if i.Cache != nil { - log.Printf("[DEBUG] looking for provider %s %s in plugin cache", provider.Name, version) - cached := i.Cache.CachedPluginPath("provider", provider.Name, version) - if cached == "" { - log.Printf("[DEBUG] %s %s not yet in cache, so downloading %s", provider.Name, version, url) - err := getter.Get(i.Cache.InstallDir(), url) - if err != nil { - return err - } - // should now be in cache - cached = i.Cache.CachedPluginPath("provider", provider.Name, version) - if cached == "" { - // should never happen if the getter is behaving properly - // and the plugins are packaged properly. - return fmt.Errorf("failed to find downloaded plugin in cache %s", i.Cache.InstallDir()) - } - } - - // Link or copy the cached binary into our install dir so the - // normal resolution machinery can find it. - filename := filepath.Base(cached) - targetPath := filepath.Join(i.Dir, filename) - // check if the target dir exists, and create it if not - var err error - if _, StatErr := os.Stat(i.Dir); os.IsNotExist(StatErr) { - err = os.MkdirAll(i.Dir, 0700) - } - if err != nil { - return err - } - - log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider.Name, version, targetPath, cached) - - // Delete if we can. If there's nothing there already then no harm done. - // This is important because we can't create a link if there's - // already a file of the same name present. - // (any other error here we'll catch below when we try to write here) - os.Remove(targetPath) - - // We don't attempt linking on Windows because links are not - // comprehensively supported by all tools/apps in Windows and - // so we choose to be conservative to avoid creating any - // weird issues for Windows users. - linkErr := errors.New("link not supported for Windows") // placeholder error, never actually returned - if runtime.GOOS != "windows" { - // Try hard linking first. Hard links are preferable because this - // creates a self-contained directory that doesn't depend on the - // cache after install. - linkErr = os.Link(cached, targetPath) - - // If that failed, try a symlink. This _does_ depend on the cache - // after install, so the user must manage the cache more carefully - // in this case, but avoids creating redundant copies of the - // plugins on disk. - if linkErr != nil { - linkErr = os.Symlink(cached, targetPath) - } - } - - // If we still have an error then we'll try a copy as a fallback. - // In this case either the OS is Windows or the target filesystem - // can't support symlinks. - if linkErr != nil { - srcFile, err := os.Open(cached) - if err != nil { - return fmt.Errorf("failed to open cached plugin %s: %s", cached, err) - } - defer srcFile.Close() - - destFile, err := os.OpenFile(targetPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create %s: %s", targetPath, err) - } - - _, err = io.Copy(destFile, srcFile) - if err != nil { - destFile.Close() - return fmt.Errorf("failed to copy cached plugin from %s to %s: %s", cached, targetPath, err) - } - - err = destFile.Close() - if err != nil { - return fmt.Errorf("error creating %s: %s", targetPath, err) - } - } - - // One way or another, by the time we get here we should have either - // a link or a copy of the cached plugin within i.Dir, as expected. - } else { - log.Printf("[DEBUG] plugin cache is disabled, so downloading %s %s from %s", provider.Name, version, url) - err := getter.Get(i.Dir, url) - if err != nil { - return err - } - } - return nil -} - -func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { - purge := make(PluginMetaSet) - - present := FindPlugins("provider", []string{i.Dir}) - for meta := range present { - chosen, ok := used[meta.Name] - if !ok { - purge.Add(meta) - } - if chosen.Path != meta.Path { - purge.Add(meta) - } - } - - removed := make(PluginMetaSet) - var errs error - for meta := range purge { - path := meta.Path - err := os.Remove(path) - if err != nil { - errs = multierror.Append(errs, fmt.Errorf( - "failed to remove unused provider plugin %s: %s", - path, err, - )) - } else { - removed.Add(meta) - } - } - - return removed, errs -} - -func (i *ProviderInstaller) getProviderChecksum(resp *response.TerraformProviderPlatformLocation) (string, error) { - // Get SHA256SUMS file. - shasums, err := getFile(resp.ShasumsURL) - if err != nil { - log.Printf("[ERROR] error fetching checksums from %q: %s", resp.ShasumsURL, err) - return "", ErrorMissingChecksumVerification - } - - // Get SHA256SUMS.sig file. - signature, err := getFile(resp.ShasumsSignatureURL) - if err != nil { - log.Printf("[ERROR] error fetching checksums signature from %q: %s", resp.ShasumsSignatureURL, err) - return "", ErrorSignatureVerification - } - - // Verify the GPG signature returned from the Registry. - asciiArmor := resp.SigningKeys.GPGASCIIArmor() - signer, err := verifySig(shasums, signature, asciiArmor) - if err != nil { - log.Printf("[ERROR] error verifying signature: %s", err) - return "", ErrorSignatureVerification - } - - // Also verify the GPG signature against the HashiCorp public key. This is - // a temporary additional check until a more robust key verification - // process is added in a future release. - _, err = verifySig(shasums, signature, HashicorpPublicKey) - if err != nil { - log.Printf("[ERROR] error verifying signature against HashiCorp public key: %s", err) - return "", ErrorSignatureVerification - } - - // Display identity for GPG key which succeeded verifying the signature. - // This could also be used to display to the user with i.Ui.Info(). - identities := []string{} - for k := range signer.Identities { - identities = append(identities, k) - } - identity := strings.Join(identities, ", ") - log.Printf("[DEBUG] verified GPG signature with key from %s", identity) - - // Extract checksum for this os/arch platform binary and verify against Registry - checksum := checksumForFile(shasums, resp.Filename) - if checksum == "" { - log.Printf("[ERROR] missing checksum for %s from source %s", resp.Filename, resp.ShasumsURL) - return "", ErrorMissingChecksumVerification - } else if checksum != resp.Shasum { - log.Printf("[ERROR] unexpected checksum for %s from source %q", resp.Filename, resp.ShasumsURL) - return "", ErrorChecksumVerification - } - - return checksum, nil -} - -func (i *ProviderInstaller) hostname() (string, error) { - provider := regsrc.NewTerraformProvider("", i.OS, i.Arch) - svchost, err := provider.SvcHost() - if err != nil { - return "", err - } - - return svchost.ForDisplay(), nil -} - -// list all versions available for the named provider -func (i *ProviderInstaller) listProviderVersions(provider addrs.ProviderType) (*response.TerraformProviderVersions, error) { - req := regsrc.NewTerraformProvider(provider.Name, i.OS, i.Arch) - versions, err := i.registry.TerraformProviderVersions(req) - return versions, err -} - -func (i *ProviderInstaller) listProviderDownloadURLs(name, version string) (*response.TerraformProviderPlatformLocation, error) { - urls, err := i.registry.TerraformProviderLocation(regsrc.NewTerraformProvider(name, i.OS, i.Arch), version) - if urls == nil { - return nil, fmt.Errorf("No download urls found for provider %s", name) - } - return urls, err -} - -// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match. -// Prerelease versions are filtered. -func (i *ProviderInstaller) findClosestProtocolCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) { - // Loop through all the provider versions to find the earliest and latest - // versions that match the installer protocol to then select the closest of the two - var latest, earliest *response.TerraformProviderVersion - for _, version := range versions { - // Prereleases are filtered and will not be suggested - v, err := VersionStr(version.Version).Parse() - if err != nil || v.IsPrerelease() { - continue - } - - if err := i.checkPluginProtocol(version); err == nil { - if earliest == nil { - // Found the first provider version with compatible protocol - earliest = version - } - // Update the latest protocol compatible version - latest = version - } - } - if earliest == nil { - // No compatible protocol was found for any version - return nil, ErrorNoVersionCompatible - } - - // Convert protocols to comparable types - protoString := strconv.Itoa(int(i.PluginProtocolVersion)) - protocolVersion, err := VersionStr(protoString).Parse() - if err != nil { - return nil, fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) - } - - earliestVersionProtocol, err := VersionStr(earliest.Protocols[0]).Parse() - if err != nil { - return nil, err - } - - // Compare installer protocol version with the first protocol listed of the earliest match - // [A, B] where A is assumed the earliest compatible major version of the protocol pair - if protocolVersion.NewerThan(earliestVersionProtocol) { - // Provider protocols are too old, the closest version is the earliest compatible version - return earliest, nil - } - - // Provider protocols are too new, the closest version is the latest compatible version - return latest, nil -} - -func (i *ProviderInstaller) checkPluginProtocol(versionMeta *response.TerraformProviderVersion) error { - // TODO: should this be a different error? We should probably differentiate between - // no compatible versions and no protocol versions listed at all - if len(versionMeta.Protocols) == 0 { - return fmt.Errorf("no plugin protocol versions listed") - } - - protoString := strconv.Itoa(int(i.PluginProtocolVersion)) - protocolVersion, err := VersionStr(protoString).Parse() - if err != nil { - return fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) - } - protocolConstraint, err := protocolVersion.MinorUpgradeConstraintStr().Parse() - if err != nil { - // This should not fail if the preceding function succeeded. - return fmt.Errorf("invalid plugin protocol version: %q", protocolVersion.String()) - } - - for _, p := range versionMeta.Protocols { - proPro, err := VersionStr(p).Parse() - if err != nil { - // invalid protocol reported by the registry. Move along. - log.Printf("[WARN] invalid provider protocol version %q found in the registry", versionMeta.Version) - continue - } - // success! - if protocolConstraint.Allows(proPro) { - return nil - } - } - - return ErrorNoVersionCompatible -} - -// platformCompatibleVersions returns a list of provider versions that are -// compatible with the requested platform. -func (i *ProviderInstaller) platformCompatibleVersions(versions []*response.TerraformProviderVersion) []*response.TerraformProviderVersion { - var v []*response.TerraformProviderVersion - for _, version := range versions { - if err := i.checkPlatformCompatibility(version); err == nil { - v = append(v, version) - } - } - return v -} - -func (i *ProviderInstaller) checkPlatformCompatibility(versionMeta *response.TerraformProviderVersion) error { - if len(versionMeta.Platforms) == 0 { - return fmt.Errorf("no supported provider platforms listed") - } - for _, p := range versionMeta.Platforms { - if p.Arch == i.Arch && p.OS == i.OS { - return nil - } - } - return fmt.Errorf("version %s does not support the requested platform %s_%s", versionMeta.Version, i.OS, i.Arch) -} - -// take the list of available versions for a plugin, and filter out those that -// don't fit the constraints. -func allowedVersions(available *response.TerraformProviderVersions, required Constraints) []*response.TerraformProviderVersion { - var allowed []*response.TerraformProviderVersion - - for _, v := range available.Versions { - version, err := VersionStr(v.Version).Parse() - if err != nil { - log.Printf("[WARN] invalid version found for %q: %s", available.ID, err) - continue - } - if required.Allows(version) { - allowed = append(allowed, v) - } - } - return allowed -} - -func checksumForFile(sums []byte, name string) string { - for _, line := range strings.Split(string(sums), "\n") { - parts := strings.Fields(line) - if len(parts) > 1 && parts[1] == name { - return parts[0] - } - } - return "" -} - -func getFile(url string) ([]byte, error) { - resp, err := httpClient.Get(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s", resp.Status) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return data, err - } - return data, nil -} - -// providerProtocolTooOld is a message sent to the CLI UI if the provider's -// supported protocol versions are too old for the user's version of terraform, -// but an older version of the provider is compatible. -const providerProtocolTooOld = ` -[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] - -Provider version %s is the earliest compatible version. Select it with -the following version constraint: - - version = %q - -Terraform checked all of the plugin versions matching the given constraint: - %s - -Consult the documentation for this provider for more information on -compatibility between provider and Terraform versions. -` - -// providerProtocolTooNew is a message sent to the CLI UI if the provider's -// supported protocol versions are too new for the user's version of terraform, -// and the user could either upgrade terraform or choose an older version of the -// provider -const providerProtocolTooNew = ` -[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] - -Provider version %s is the latest compatible version. Select it with -the following constraint: - - version = %q - -Terraform checked all of the plugin versions matching the given constraint: - %s - -Consult the documentation for this provider for more information on -compatibility between provider and Terraform versions. - -Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases. -` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go deleted file mode 100644 index 1a100426..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go +++ /dev/null @@ -1,48 +0,0 @@ -package discovery - -// PluginCache is an interface implemented by objects that are able to maintain -// a cache of plugins. -type PluginCache interface { - // CachedPluginPath returns a path where the requested plugin is already - // cached, or an empty string if the requested plugin is not yet cached. - CachedPluginPath(kind string, name string, version Version) string - - // InstallDir returns the directory that new plugins should be installed into - // in order to populate the cache. This directory should be used as the - // first argument to getter.Get when downloading plugins with go-getter. - // - // After installing into this directory, use CachedPluginPath to obtain the - // path where the plugin was installed. - InstallDir() string -} - -// NewLocalPluginCache returns a PluginCache that caches plugins in a -// given local directory. -func NewLocalPluginCache(dir string) PluginCache { - return &pluginCache{ - Dir: dir, - } -} - -type pluginCache struct { - Dir string -} - -func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { - allPlugins := FindPlugins(kind, []string{c.Dir}) - plugins := allPlugins.WithName(name).WithVersion(version) - - if plugins.Count() == 0 { - // nothing cached - return "" - } - - // There should generally be only one plugin here; if there's more than - // one match for some reason then we'll just choose one arbitrarily. - plugin := plugins.Newest() - return plugin.Path -} - -func (c *pluginCache) InstallDir() string { - return c.Dir -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go deleted file mode 100644 index 4622ca05..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go +++ /dev/null @@ -1,34 +0,0 @@ -package discovery - -// HashicorpPublicKey is the HashiCorp public key, also available at -// https://www.hashicorp.com/security -const HashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f -W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq -fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA -3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca -KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k -SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 -cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG -CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n -Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i -SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi -psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w -sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO -klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW -WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 -wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j -2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM -skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo -mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y -0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA -CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc -z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP -0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG -unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ -EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ -oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C -=LYpS ------END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go deleted file mode 100644 index bdcebcb9..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go +++ /dev/null @@ -1,41 +0,0 @@ -package discovery - -import ( - "crypto/sha256" - "io" - "os" -) - -// PluginMeta is metadata about a plugin, useful for launching the plugin -// and for understanding which plugins are available. -type PluginMeta struct { - // Name is the name of the plugin, e.g. as inferred from the plugin - // binary's filename, or by explicit configuration. - Name string - - // Version is the semver version of the plugin, expressed as a string - // that might not be semver-valid. - Version VersionStr - - // Path is the absolute path of the executable that can be launched - // to provide the RPC server for this plugin. - Path string -} - -// SHA256 returns a SHA256 hash of the content of the referenced executable -// file, or an error if the file's contents cannot be read. -func (m PluginMeta) SHA256() ([]byte, error) { - f, err := os.Open(m.Path) - if err != nil { - return nil, err - } - defer f.Close() - - h := sha256.New() - _, err = io.Copy(h, f) - if err != nil { - return nil, err - } - - return h.Sum(nil), nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go deleted file mode 100644 index 3a992892..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go +++ /dev/null @@ -1,195 +0,0 @@ -package discovery - -// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria. -// -// Methods on this type allow filtering of the set to produce subsets that -// meet more restrictive criteria. -type PluginMetaSet map[PluginMeta]struct{} - -// Add inserts the given PluginMeta into the receiving set. This is a no-op -// if the given meta is already present. -func (s PluginMetaSet) Add(p PluginMeta) { - s[p] = struct{}{} -} - -// Remove removes the given PluginMeta from the receiving set. This is a no-op -// if the given meta is not already present. -func (s PluginMetaSet) Remove(p PluginMeta) { - delete(s, p) -} - -// Has returns true if the given meta is in the receiving set, or false -// otherwise. -func (s PluginMetaSet) Has(p PluginMeta) bool { - _, ok := s[p] - return ok -} - -// Count returns the number of metas in the set -func (s PluginMetaSet) Count() int { - return len(s) -} - -// ValidateVersions returns two new PluginMetaSets, separating those with -// versions that have syntax-valid semver versions from those that don't. -// -// Eliminating invalid versions from consideration (and possibly warning about -// them) is usually the first step of working with a meta set after discovery -// has completed. -func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) { - valid = make(PluginMetaSet) - invalid = make(PluginMetaSet) - for p := range s { - if _, err := p.Version.Parse(); err == nil { - valid.Add(p) - } else { - invalid.Add(p) - } - } - return -} - -// WithName returns the subset of metas that have the given name. -func (s PluginMetaSet) WithName(name string) PluginMetaSet { - ns := make(PluginMetaSet) - for p := range s { - if p.Name == name { - ns.Add(p) - } - } - return ns -} - -// WithVersion returns the subset of metas that have the given version. -// -// This should be used only with the "valid" result from ValidateVersions; -// it will ignore any plugin metas that have invalid version strings. -func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { - ns := make(PluginMetaSet) - for p := range s { - gotVersion, err := p.Version.Parse() - if err != nil { - continue - } - if gotVersion.Equal(version) { - ns.Add(p) - } - } - return ns -} - -// ByName groups the metas in the set by their Names, returning a map. -func (s PluginMetaSet) ByName() map[string]PluginMetaSet { - ret := make(map[string]PluginMetaSet) - for p := range s { - if _, ok := ret[p.Name]; !ok { - ret[p.Name] = make(PluginMetaSet) - } - ret[p.Name].Add(p) - } - return ret -} - -// Newest returns the one item from the set that has the newest Version value. -// -// The result is meaningful only if the set is already filtered such that -// all of the metas have the same Name. -// -// If there isn't at least one meta in the set then this function will panic. -// Use Count() to ensure that there is at least one value before calling. -// -// If any of the metas have invalid version strings then this function will -// panic. Use ValidateVersions() first to filter out metas with invalid -// versions. -// -// If two metas have the same Version then one is arbitrarily chosen. This -// situation should be avoided by pre-filtering the set. -func (s PluginMetaSet) Newest() PluginMeta { - if len(s) == 0 { - panic("can't call NewestStable on empty PluginMetaSet") - } - - var first = true - var winner PluginMeta - var winnerVersion Version - for p := range s { - version, err := p.Version.Parse() - if err != nil { - panic(err) - } - - if first == true || version.NewerThan(winnerVersion) { - winner = p - winnerVersion = version - first = false - } - } - - return winner -} - -// ConstrainVersions takes a set of requirements and attempts to -// return a map from name to a set of metas that have the matching -// name and an appropriate version. -// -// If any of the given requirements match *no* plugins then its PluginMetaSet -// in the returned map will be empty. -// -// All viable metas are returned, so the caller can apply any desired filtering -// to reduce down to a single option. For example, calling Newest() to obtain -// the highest available version. -// -// If any of the metas in the set have invalid version strings then this -// function will panic. Use ValidateVersions() first to filter out metas with -// invalid versions. -func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet { - ret := make(map[string]PluginMetaSet) - for p := range s { - name := p.Name - allowedVersions, ok := reqd[name] - if !ok { - continue - } - if _, ok := ret[p.Name]; !ok { - ret[p.Name] = make(PluginMetaSet) - } - version, err := p.Version.Parse() - if err != nil { - panic(err) - } - if allowedVersions.Allows(version) { - ret[p.Name].Add(p) - } - } - return ret -} - -// OverridePaths returns a new set where any existing plugins with the given -// names are removed and replaced with the single path given in the map. -// -// This is here only to continue to support the legacy way of overriding -// plugin binaries in the .terraformrc file. It treats all given plugins -// as pre-versioning (version 0.0.0). This mechanism will eventually be -// phased out, with vendor directories being the intended replacement. -func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet { - ret := make(PluginMetaSet) - for p := range s { - if _, ok := paths[p.Name]; ok { - // Skip plugins that we're overridding - continue - } - - ret.Add(p) - } - - // Now add the metadata for overriding plugins - for name, path := range paths { - ret.Add(PluginMeta{ - Name: name, - Version: VersionZero, - Path: path, - }) - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go deleted file mode 100644 index 75430fdd..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go +++ /dev/null @@ -1,105 +0,0 @@ -package discovery - -import ( - "bytes" -) - -// PluginRequirements describes a set of plugins (assumed to be of a consistent -// kind) that are required to exist and have versions within the given -// corresponding sets. -type PluginRequirements map[string]*PluginConstraints - -// PluginConstraints represents an element of PluginRequirements describing -// the constraints for a single plugin. -type PluginConstraints struct { - // Specifies that the plugin's version must be within the given - // constraints. - Versions Constraints - - // If non-nil, the hash of the on-disk plugin executable must exactly - // match the SHA256 hash given here. - SHA256 []byte -} - -// Allows returns true if the given version is within the receiver's version -// constraints. -func (s *PluginConstraints) Allows(v Version) bool { - return s.Versions.Allows(v) -} - -// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable, -// either because it matches the constraint or because there is no such -// constraint. -func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool { - if s.SHA256 == nil { - return true - } - return bytes.Equal(s.SHA256, digest) -} - -// Merge takes the contents of the receiver and the other given requirements -// object and merges them together into a single requirements structure -// that satisfies both sets of requirements. -// -// Note that it doesn't make sense to merge two PluginRequirements with -// differing required plugin SHA256 hashes, since the result will never -// match any plugin. -func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements { - ret := make(PluginRequirements) - for n, c := range r { - ret[n] = &PluginConstraints{ - Versions: Constraints{}.Append(c.Versions), - SHA256: c.SHA256, - } - } - for n, c := range other { - if existing, exists := ret[n]; exists { - ret[n].Versions = ret[n].Versions.Append(c.Versions) - - if existing.SHA256 != nil { - if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) { - // If we've been asked to merge two constraints with - // different SHA256 hashes then we'll produce a dummy value - // that can never match anything. This is a silly edge case - // that no reasonable caller should hit. - ret[n].SHA256 = []byte(invalidProviderHash) - } - } else { - ret[n].SHA256 = c.SHA256 // might still be nil - } - } else { - ret[n] = &PluginConstraints{ - Versions: Constraints{}.Append(c.Versions), - SHA256: c.SHA256, - } - } - } - return ret -} - -// LockExecutables applies additional constraints to the receiver that -// require plugin executables with specific SHA256 digests. This modifies -// the receiver in-place, since it's intended to be applied after -// version constraints have been resolved. -// -// The given map must include a key for every plugin that is already -// required. If not, any missing keys will cause the corresponding plugin -// to never match, though the direct caller doesn't necessarily need to -// guarantee this as long as the downstream code _applying_ these constraints -// is able to deal with the non-match in some way. -func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) { - for name, cons := range r { - digest := sha256s[name] - - if digest == nil { - // Prevent any match, which will then presumably cause the - // downstream consumer of this requirements to report an error. - cons.SHA256 = []byte(invalidProviderHash) - continue - } - - cons.SHA256 = digest - } -} - -const invalidProviderHash = "" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go deleted file mode 100644 index 7bbae50c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go +++ /dev/null @@ -1,19 +0,0 @@ -package discovery - -import ( - "bytes" - "strings" - - "golang.org/x/crypto/openpgp" -) - -// Verify the data using the provided openpgp detached signature and the -// embedded hashicorp public key. -func verifySig(data, sig []byte, armor string) (*openpgp.Entity, error) { - el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(armor)) - if err != nil { - return nil, err - } - - return openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go deleted file mode 100644 index 4311d510..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go +++ /dev/null @@ -1,77 +0,0 @@ -package discovery - -import ( - "fmt" - "sort" - - version "github.com/hashicorp/go-version" -) - -const VersionZero = "0.0.0" - -// A VersionStr is a string containing a possibly-invalid representation -// of a semver version number. Call Parse on it to obtain a real Version -// object, or discover that it is invalid. -type VersionStr string - -// Parse transforms a VersionStr into a Version if it is -// syntactically valid. If it isn't then an error is returned instead. -func (s VersionStr) Parse() (Version, error) { - raw, err := version.NewVersion(string(s)) - if err != nil { - return Version{}, err - } - return Version{raw}, nil -} - -// MustParse transforms a VersionStr into a Version if it is -// syntactically valid. If it isn't then it panics. -func (s VersionStr) MustParse() Version { - ret, err := s.Parse() - if err != nil { - panic(err) - } - return ret -} - -// Version represents a version number that has been parsed from -// a semver string and known to be valid. -type Version struct { - // We wrap this here just because it avoids a proliferation of - // direct go-version imports all over the place, and keeps the - // version-processing details within this package. - raw *version.Version -} - -func (v Version) String() string { - return v.raw.String() -} - -func (v Version) NewerThan(other Version) bool { - return v.raw.GreaterThan(other.raw) -} - -func (v Version) Equal(other Version) bool { - return v.raw.Equal(other.raw) -} - -// IsPrerelease determines if version is a prerelease -func (v Version) IsPrerelease() bool { - return v.raw.Prerelease() != "" -} - -// MinorUpgradeConstraintStr returns a ConstraintStr that would permit -// minor upgrades relative to the receiving version. -func (v Version) MinorUpgradeConstraintStr() ConstraintStr { - segments := v.raw.Segments() - return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1])) -} - -type Versions []Version - -// Sort sorts version from newest to oldest. -func (v Versions) Sort() { - sort.Slice(v, func(i, j int) bool { - return v[i].NewerThan(v[j]) - }) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go deleted file mode 100644 index fc8b6f8b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go +++ /dev/null @@ -1,83 +0,0 @@ -package discovery - -import ( - "sort" - - version "github.com/hashicorp/go-version" -) - -// A ConstraintStr is a string containing a possibly-invalid representation -// of a version constraint provided in configuration. Call Parse on it to -// obtain a real Constraint object, or discover that it is invalid. -type ConstraintStr string - -// Parse transforms a ConstraintStr into a Constraints if it is -// syntactically valid. If it isn't then an error is returned instead. -func (s ConstraintStr) Parse() (Constraints, error) { - raw, err := version.NewConstraint(string(s)) - if err != nil { - return Constraints{}, err - } - return Constraints{raw}, nil -} - -// MustParse is like Parse but it panics if the constraint string is invalid. -func (s ConstraintStr) MustParse() Constraints { - ret, err := s.Parse() - if err != nil { - panic(err) - } - return ret -} - -// Constraints represents a set of versions which any given Version is either -// a member of or not. -type Constraints struct { - raw version.Constraints -} - -// NewConstraints creates a Constraints based on a version.Constraints. -func NewConstraints(c version.Constraints) Constraints { - return Constraints{c} -} - -// AllVersions is a Constraints containing all versions -var AllVersions Constraints - -func init() { - AllVersions = Constraints{ - raw: make(version.Constraints, 0), - } -} - -// Allows returns true if the given version permitted by the receiving -// constraints set. -func (s Constraints) Allows(v Version) bool { - return s.raw.Check(v.raw) -} - -// Append combines the receiving set with the given other set to produce -// a set that is the intersection of both sets, which is to say that resulting -// constraints contain only the versions that are members of both. -func (s Constraints) Append(other Constraints) Constraints { - raw := make(version.Constraints, 0, len(s.raw)+len(other.raw)) - - // Since "raw" is a list of constraints that remove versions from the set, - // "Intersection" is implemented by concatenating together those lists, - // thus leaving behind only the versions not removed by either list. - raw = append(raw, s.raw...) - raw = append(raw, other.raw...) - - // while the set is unordered, we sort these lexically for consistent output - sort.Slice(raw, func(i, j int) bool { - return raw[i].String() < raw[j].String() - }) - - return Constraints{raw} -} - -// String returns a string representation of the set members as a set -// of range constraints. -func (s Constraints) String() string { - return s.raw.String() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go deleted file mode 100644 index 0f48f244..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go +++ /dev/null @@ -1,47 +0,0 @@ -package providers - -import ( - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// AddressedTypes is a helper that extracts all of the distinct provider -// types from the given list of relative provider configuration addresses. -func AddressedTypes(providerAddrs []addrs.ProviderConfig) []string { - if len(providerAddrs) == 0 { - return nil - } - m := map[string]struct{}{} - for _, addr := range providerAddrs { - m[addr.Type] = struct{}{} - } - - names := make([]string, 0, len(m)) - for typeName := range m { - names = append(names, typeName) - } - - sort.Strings(names) // Stable result for tests - return names -} - -// AddressedTypesAbs is a helper that extracts all of the distinct provider -// types from the given list of absolute provider configuration addresses. -func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []string { - if len(providerAddrs) == 0 { - return nil - } - m := map[string]struct{}{} - for _, addr := range providerAddrs { - m[addr.ProviderConfig.Type] = struct{}{} - } - - names := make([]string, 0, len(m)) - for typeName := range m { - names = append(names, typeName) - } - - sort.Strings(names) // Stable result for tests - return names -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go deleted file mode 100644 index 39aa1de6..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package providers contains the interface and primary types required to -// implement a Terraform resource provider. -package providers diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go deleted file mode 100644 index 3d0aa8ec..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go +++ /dev/null @@ -1,359 +0,0 @@ -package providers - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Interface represents the set of methods required for a complete resource -// provider plugin. -type Interface interface { - // GetSchema returns the complete schema for the provider. - GetSchema() GetSchemaResponse - - // PrepareProviderConfig allows the provider to validate the configuration - // values, and set or override any values with defaults. - PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse - - // ValidateResourceTypeConfig allows the provider to validate the resource - // configuration values. - ValidateResourceTypeConfig(ValidateResourceTypeConfigRequest) ValidateResourceTypeConfigResponse - - // ValidateDataSource allows the provider to validate the data source - // configuration values. - ValidateDataSourceConfig(ValidateDataSourceConfigRequest) ValidateDataSourceConfigResponse - - // UpgradeResourceState is called when the state loader encounters an - // instance state whose schema version is less than the one reported by the - // currently-used version of the corresponding provider, and the upgraded - // result is used for any further processing. - UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse - - // Configure configures and initialized the provider. - Configure(ConfigureRequest) ConfigureResponse - - // Stop is called when the provider should halt any in-flight actions. - // - // Stop should not block waiting for in-flight actions to complete. It - // should take any action it wants and return immediately acknowledging it - // has received the stop request. Terraform will not make any further API - // calls to the provider after Stop is called. - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - // ReadResource refreshes a resource and returns its current state. - ReadResource(ReadResourceRequest) ReadResourceResponse - - // PlanResourceChange takes the current state and proposed state of a - // resource, and returns the planned final state. - PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse - - // ApplyResourceChange takes the planned state for a resource, which may - // yet contain unknown computed values, and applies the changes returning - // the final state. - ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse - - // ImportResourceState requests that the given resource be imported. - ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse - - // ReadDataSource returns the data source's current state. - ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse - - // Close shuts down the plugin process if applicable. - Close() error -} - -type GetSchemaResponse struct { - // Provider is the schema for the provider itself. - Provider Schema - - // ResourceTypes map the resource type name to that type's schema. - ResourceTypes map[string]Schema - - // DataSources maps the data source name to that data source's schema. - DataSources map[string]Schema - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// Schema pairs a provider or resource schema with that schema's version. -// This is used to be able to upgrade the schema in UpgradeResourceState. -type Schema struct { - Version int64 - Block *configschema.Block -} - -type PrepareProviderConfigRequest struct { - // Config is the raw configuration value for the provider. - Config cty.Value -} - -type PrepareProviderConfigResponse struct { - // PreparedConfig is the configuration as prepared by the provider. - PreparedConfig cty.Value - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ValidateResourceTypeConfigRequest struct { - // TypeName is the name of the resource type to validate. - TypeName string - - // Config is the configuration value to validate, which may contain unknown - // values. - Config cty.Value -} - -type ValidateResourceTypeConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ValidateDataSourceConfigRequest struct { - // TypeName is the name of the data source type to validate. - TypeName string - - // Config is the configuration value to validate, which may contain unknown - // values. - Config cty.Value -} - -type ValidateDataSourceConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type UpgradeResourceStateRequest struct { - // TypeName is the name of the resource type being upgraded - TypeName string - - // Version is version of the schema that created the current state. - Version int64 - - // RawStateJSON and RawStateFlatmap contiain the state that needs to be - // upgraded to match the current schema version. Because the schema is - // unknown, this contains only the raw data as stored in the state. - // RawStateJSON is the current json state encoding. - // RawStateFlatmap is the legacy flatmap encoding. - // Only on of these fields may be set for the upgrade request. - RawStateJSON []byte - RawStateFlatmap map[string]string -} - -type UpgradeResourceStateResponse struct { - // UpgradedState is the newly upgraded resource state. - UpgradedState cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ConfigureRequest struct { - // Terraform version is the version string from the running instance of - // terraform. Providers can use TerraformVersion to verify compatibility, - // and to store for informational purposes. - TerraformVersion string - - // Config is the complete configuration value for the provider. - Config cty.Value -} - -type ConfigureResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ReadResourceRequest struct { - // TypeName is the name of the resource type being read. - TypeName string - - // PriorState contains the previously saved state value for this resource. - PriorState cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -type ReadResourceResponse struct { - // NewState contains the current state of the resource. - NewState cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -type PlanResourceChangeRequest struct { - // TypeName is the name of the resource type to plan. - TypeName string - - // PriorState is the previously saved state value for this resource. - PriorState cty.Value - - // ProposedNewState is the expected state after the new configuration is - // applied. This is created by directly applying the configuration to the - // PriorState. The provider is then responsible for applying any further - // changes required to create the proposed final state. - ProposedNewState cty.Value - - // Config is the resource configuration, before being merged with the - // PriorState. Any value not explicitly set in the configuration will be - // null. Config is supplied for reference, but Provider implementations - // should prefer the ProposedNewState in most circumstances. - Config cty.Value - - // PriorPrivate is the previously saved private data returned from the - // provider during the last apply. - PriorPrivate []byte -} - -type PlanResourceChangeResponse struct { - // PlannedState is the expected state of the resource once the current - // configuration is applied. - PlannedState cty.Value - - // RequiresReplace is the list of thee attributes that are requiring - // resource replacement. - RequiresReplace []cty.Path - - // PlannedPrivate is an opaque blob that is not interpreted by terraform - // core. This will be saved and relayed back to the provider during - // ApplyResourceChange. - PlannedPrivate []byte - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // LegacyTypeSystem is set only if the provider is using the legacy SDK - // whose type system cannot be precisely mapped into the Terraform type - // system. We use this to bypass certain consistency checks that would - // otherwise fail due to this imprecise mapping. No other provider or SDK - // implementation is permitted to set this. - LegacyTypeSystem bool -} - -type ApplyResourceChangeRequest struct { - // TypeName is the name of the resource type being applied. - TypeName string - - // PriorState is the current state of resource. - PriorState cty.Value - - // Planned state is the state returned from PlanResourceChange, and should - // represent the new state, minus any remaining computed attributes. - PlannedState cty.Value - - // Config is the resource configuration, before being merged with the - // PriorState. Any value not explicitly set in the configuration will be - // null. Config is supplied for reference, but Provider implementations - // should prefer the PlannedState in most circumstances. - Config cty.Value - - // PlannedPrivate is the same value as returned by PlanResourceChange. - PlannedPrivate []byte -} - -type ApplyResourceChangeResponse struct { - // NewState is the new complete state after applying the planned change. - // In the event of an error, NewState should represent the most recent - // known state of the resource, if it exists. - NewState cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // LegacyTypeSystem is set only if the provider is using the legacy SDK - // whose type system cannot be precisely mapped into the Terraform type - // system. We use this to bypass certain consistency checks that would - // otherwise fail due to this imprecise mapping. No other provider or SDK - // implementation is permitted to set this. - LegacyTypeSystem bool -} - -type ImportResourceStateRequest struct { - // TypeName is the name of the resource type to be imported. - TypeName string - - // ID is a string with which the provider can identify the resource to be - // imported. - ID string -} - -type ImportResourceStateResponse struct { - // ImportedResources contains one or more state values related to the - // imported resource. It is not required that these be complete, only that - // there is enough identifying information for the provider to successfully - // update the states in ReadResource. - ImportedResources []ImportedResource - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// ImportedResource represents an object being imported into Terraform with the -// help of a provider. An ImportedObject is a RemoteObject that has been read -// by the provider's import handler but hasn't yet been committed to state. -type ImportedResource struct { - // TypeName is the name of the resource type associated with the - // returned state. It's possible for providers to import multiple related - // types with a single import request. - TypeName string - - // State is the state of the remote object being imported. This may not be - // complete, but must contain enough information to uniquely identify the - // resource. - State cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -// AsInstanceObject converts the receiving ImportedObject into a -// ResourceInstanceObject that has status ObjectReady. -// -// The returned object does not know its own resource type, so the caller must -// retain the ResourceType value from the source object if this information is -// needed. -// -// The returned object also has no dependency addresses, but the caller may -// freely modify the direct fields of the returned object without affecting -// the receiver. -func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { - return &states.ResourceInstanceObject{ - Status: states.ObjectReady, - Value: ir.State, - Private: ir.Private, - } -} - -type ReadDataSourceRequest struct { - // TypeName is the name of the data source type to Read. - TypeName string - - // Config is the complete configuration for the requested data source. - Config cty.Value -} - -type ReadDataSourceResponse struct { - // State is the current state of the requested data source. - State cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go deleted file mode 100644 index b42e4920..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go +++ /dev/null @@ -1,68 +0,0 @@ -package providers - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" -) - -// Resolver is an interface implemented by objects that are able to resolve -// a given set of resource provider version constraints into Factory -// callbacks. -type Resolver interface { - // Given a constraint map, return a Factory for each requested provider. - // If some or all of the constraints cannot be satisfied, return a non-nil - // slice of errors describing the problems. - ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) -} - -// ResolverFunc wraps a callback function and turns it into a Resolver -// implementation, for convenience in situations where a function and its -// associated closure are sufficient as a resolver implementation. -type ResolverFunc func(reqd discovery.PluginRequirements) (map[string]Factory, []error) - -// ResolveProviders implements Resolver by calling the -// wrapped function. -func (f ResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) { - return f(reqd) -} - -// ResolverFixed returns a Resolver that has a fixed set of provider factories -// provided by the caller. The returned resolver ignores version constraints -// entirely and just returns the given factory for each requested provider -// name. -// -// This function is primarily used in tests, to provide mock providers or -// in-process providers under test. -func ResolverFixed(factories map[string]Factory) Resolver { - return ResolverFunc(func(reqd discovery.PluginRequirements) (map[string]Factory, []error) { - ret := make(map[string]Factory, len(reqd)) - var errs []error - for name := range reqd { - if factory, exists := factories[name]; exists { - ret[name] = factory - } else { - errs = append(errs, fmt.Errorf("provider %q is not available", name)) - } - } - return ret, errs - }) -} - -// Factory is a function type that creates a new instance of a resource -// provider, or returns an error if that is impossible. -type Factory func() (Interface, error) - -// FactoryFixed is a helper that creates a Factory that just returns some given -// single provider. -// -// Unlike usual factories, the exact same instance is returned for each call -// to the factory and so this must be used in only specialized situations where -// the caller can take care to either not mutate the given provider at all -// or to mutate it in ways that will not cause unexpected behavior for others -// holding the same reference. -func FactoryFixed(p Interface) Factory { - return func() (Interface, error) { - return p, nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go deleted file mode 100644 index b03ba9a1..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package provisioners contains the interface and primary types to implement a -// Terraform resource provisioner. -package provisioners diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go deleted file mode 100644 index 7a9dca0a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go +++ /dev/null @@ -1,5 +0,0 @@ -package provisioners - -// Factory is a function type that creates a new instance of a resource -// provisioner, or returns an error if that is impossible. -type Factory func() (Interface, error) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go deleted file mode 100644 index 7d8f4076..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go +++ /dev/null @@ -1,82 +0,0 @@ -package provisioners - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Interface is the set of methods required for a resource provisioner plugin. -type Interface interface { - // GetSchema returns the schema for the provisioner configuration. - GetSchema() GetSchemaResponse - - // ValidateProvisionerConfig allows the provisioner to validate the - // configuration values. - ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse - - // ProvisionResource runs the provisioner with provided configuration. - // ProvisionResource blocks until the execution is complete. - // If the returned diagnostics contain any errors, the resource will be - // left in a tainted state. - ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse - - // Stop is called to interrupt the provisioner. - // - // Stop should not block waiting for in-flight actions to complete. It - // should take any action it wants and return immediately acknowledging it - // has received the stop request. Terraform will not make any further API - // calls to the provisioner after Stop is called. - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - // Close shuts down the plugin process if applicable. - Close() error -} - -type GetSchemaResponse struct { - // Provisioner contains the schema for this provisioner. - Provisioner *configschema.Block - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// UIOutput provides the Output method for resource provisioner -// plugins to write any output to the UI. -// -// Provisioners may call the Output method multiple times while Apply is in -// progress. It is invalid to call Output after Apply returns. -type UIOutput interface { - Output(string) -} - -type ValidateProvisionerConfigRequest struct { - // Config is the complete configuration to be used for the provisioner. - Config cty.Value -} - -type ValidateProvisionerConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ProvisionResourceRequest struct { - // Config is the complete provisioner configuration. - Config cty.Value - - // Connection contains any information required to access the resource - // instance. - Connection cty.Value - - // UIOutput is used to return output during the Apply operation. - UIOutput UIOutput -} - -type ProvisionResourceResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go deleted file mode 100644 index 4ef22052..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go +++ /dev/null @@ -1,346 +0,0 @@ -package registry - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "path" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/httpclient" - internalhttpclient "github.com/hashicorp/terraform-plugin-sdk/internal/httpclient" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/response" - "github.com/hashicorp/terraform-plugin-sdk/internal/version" - "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/disco" -) - -const ( - xTerraformGet = "X-Terraform-Get" - xTerraformVersion = "X-Terraform-Version" - requestTimeout = 10 * time.Second - modulesServiceID = "modules.v1" - providersServiceID = "providers.v1" -) - -var tfVersion = version.String() - -// Client provides methods to query Terraform Registries. -type Client struct { - // this is the client to be used for all requests. - client *http.Client - - // services is a required *disco.Disco, which may have services and - // credentials pre-loaded. - services *disco.Disco -} - -// NewClient returns a new initialized registry client. -func NewClient(services *disco.Disco, client *http.Client) *Client { - if services == nil { - services = disco.New() - } - - if client == nil { - client = internalhttpclient.New() - client.Timeout = requestTimeout - } - - services.Transport = client.Transport - - services.SetUserAgent(httpclient.TerraformUserAgent(version.String())) - - return &Client{ - client: client, - services: services, - } -} - -// Discover queries the host, and returns the url for the registry. -func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) { - service, err := c.services.DiscoverServiceURL(host, serviceID) - if err != nil { - return nil, &ServiceUnreachableError{err} - } - if !strings.HasSuffix(service.Path, "/") { - service.Path += "/" - } - return service, nil -} - -// ModuleVersions queries the registry for a module, and returns the available versions. -func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) { - host, err := module.SvcHost() - if err != nil { - return nil, err - } - - service, err := c.Discover(host, modulesServiceID) - if err != nil { - return nil, err - } - - p, err := url.Parse(path.Join(module.Module(), "versions")) - if err != nil { - return nil, err - } - - service = service.ResolveReference(p) - - log.Printf("[DEBUG] fetching module versions from %q", service) - - req, err := http.NewRequest("GET", service.String(), nil) - if err != nil { - return nil, err - } - - c.addRequestCreds(host, req) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // OK - case http.StatusNotFound: - return nil, &errModuleNotFound{addr: module} - default: - return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) - } - - var versions response.ModuleVersions - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&versions); err != nil { - return nil, err - } - - for _, mod := range versions.Modules { - for _, v := range mod.Versions { - log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) - } - } - - return &versions, nil -} - -func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { - creds, err := c.services.CredentialsForHost(host) - if err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) - return - } - - if creds != nil { - creds.PrepareRequest(req) - } -} - -// ModuleLocation find the download location for a specific version module. -// This returns a string, because the final location may contain special go-getter syntax. -func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, error) { - host, err := module.SvcHost() - if err != nil { - return "", err - } - - service, err := c.Discover(host, modulesServiceID) - if err != nil { - return "", err - } - - var p *url.URL - if version == "" { - p, err = url.Parse(path.Join(module.Module(), "download")) - } else { - p, err = url.Parse(path.Join(module.Module(), version, "download")) - } - if err != nil { - return "", err - } - download := service.ResolveReference(p) - - log.Printf("[DEBUG] looking up module location from %q", download) - - req, err := http.NewRequest("GET", download.String(), nil) - if err != nil { - return "", err - } - - c.addRequestCreds(host, req) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - // there should be no body, but save it for logging - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("error reading response body from registry: %s", err) - } - - switch resp.StatusCode { - case http.StatusOK, http.StatusNoContent: - // OK - case http.StatusNotFound: - return "", fmt.Errorf("module %q version %q not found", module, version) - default: - // anything else is an error: - return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) - } - - // the download location is in the X-Terraform-Get header - location := resp.Header.Get(xTerraformGet) - if location == "" { - return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) - } - - // If location looks like it's trying to be a relative URL, treat it as - // one. - // - // We don't do this for just _any_ location, since the X-Terraform-Get - // header is a go-getter location rather than a URL, and so not all - // possible values will parse reasonably as URLs.) - // - // When used in conjunction with go-getter we normally require this header - // to be an absolute URL, but we are more liberal here because third-party - // registry implementations may not "know" their own absolute URLs if - // e.g. they are running behind a reverse proxy frontend, or such. - if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { - locationURL, err := url.Parse(location) - if err != nil { - return "", fmt.Errorf("invalid relative URL for %q: %s", module, err) - } - locationURL = download.ResolveReference(locationURL) - location = locationURL.String() - } - - return location, nil -} - -// TerraformProviderVersions queries the registry for a provider, and returns the available versions. -func (c *Client) TerraformProviderVersions(provider *regsrc.TerraformProvider) (*response.TerraformProviderVersions, error) { - host, err := provider.SvcHost() - if err != nil { - return nil, err - } - - service, err := c.Discover(host, providersServiceID) - if err != nil { - return nil, err - } - - p, err := url.Parse(path.Join(provider.TerraformProvider(), "versions")) - if err != nil { - return nil, err - } - - service = service.ResolveReference(p) - - log.Printf("[DEBUG] fetching provider versions from %q", service) - - req, err := http.NewRequest("GET", service.String(), nil) - if err != nil { - return nil, err - } - - c.addRequestCreds(host, req) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // OK - case http.StatusNotFound: - return nil, &errProviderNotFound{addr: provider} - default: - return nil, fmt.Errorf("error looking up provider versions: %s", resp.Status) - } - - var versions response.TerraformProviderVersions - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&versions); err != nil { - return nil, err - } - - return &versions, nil -} - -// TerraformProviderLocation queries the registry for a provider download metadata -func (c *Client) TerraformProviderLocation(provider *regsrc.TerraformProvider, version string) (*response.TerraformProviderPlatformLocation, error) { - host, err := provider.SvcHost() - if err != nil { - return nil, err - } - - service, err := c.Discover(host, providersServiceID) - if err != nil { - return nil, err - } - - p, err := url.Parse(path.Join( - provider.TerraformProvider(), - version, - "download", - provider.OS, - provider.Arch, - )) - if err != nil { - return nil, err - } - - service = service.ResolveReference(p) - - log.Printf("[DEBUG] fetching provider location from %q", service) - - req, err := http.NewRequest("GET", service.String(), nil) - if err != nil { - return nil, err - } - - c.addRequestCreds(host, req) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var loc response.TerraformProviderPlatformLocation - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&loc); err != nil { - return nil, err - } - - switch resp.StatusCode { - case http.StatusOK, http.StatusNoContent: - // OK - case http.StatusNotFound: - return nil, fmt.Errorf("provider %q version %q not found", provider.TerraformProvider(), version) - default: - // anything else is an error: - return nil, fmt.Errorf("error getting download location for %q: %s", provider.TerraformProvider(), resp.Status) - } - - return &loc, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go deleted file mode 100644 index b05438c4..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go +++ /dev/null @@ -1,55 +0,0 @@ -package registry - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" - "github.com/hashicorp/terraform-svchost/disco" -) - -type errModuleNotFound struct { - addr *regsrc.Module -} - -func (e *errModuleNotFound) Error() string { - return fmt.Sprintf("module %s not found", e.addr) -} - -// IsModuleNotFound returns true only if the given error is a "module not found" -// error. This allows callers to recognize this particular error condition -// as distinct from operational errors such as poor network connectivity. -func IsModuleNotFound(err error) bool { - _, ok := err.(*errModuleNotFound) - return ok -} - -type errProviderNotFound struct { - addr *regsrc.TerraformProvider -} - -func (e *errProviderNotFound) Error() string { - return fmt.Sprintf("provider %s not found", e.addr) -} - -// IsServiceNotProvided returns true only if the given error is a "service not provided" -// error. This allows callers to recognize this particular error condition -// as distinct from operational errors such as poor network connectivity. -func IsServiceNotProvided(err error) bool { - _, ok := err.(*disco.ErrServiceNotProvided) - return ok -} - -// ServiceUnreachableError Registry service is unreachable -type ServiceUnreachableError struct { - err error -} - -func (e *ServiceUnreachableError) Error() string { - return e.err.Error() -} - -// IsServiceUnreachable returns true if the registry/discovery service was unreachable -func IsServiceUnreachable(err error) bool { - _, ok := err.(*ServiceUnreachableError) - return ok -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go deleted file mode 100644 index c9bc40be..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go +++ /dev/null @@ -1,140 +0,0 @@ -package regsrc - -import ( - "regexp" - "strings" - - "github.com/hashicorp/terraform-svchost" -) - -var ( - // InvalidHostString is a placeholder returned when a raw host can't be - // converted by IDNA spec. It will never be returned for any host for which - // Valid() is true. - InvalidHostString = "" - - // urlLabelEndSubRe is a sub-expression that matches any character that's - // allowed at the start or end of a URL label according to RFC1123. - urlLabelEndSubRe = "[0-9A-Za-z]" - - // urlLabelEndSubRe is a sub-expression that matches any character that's - // allowed at in a non-start or end of a URL label according to RFC1123. - urlLabelMidSubRe = "[0-9A-Za-z-]" - - // urlLabelUnicodeSubRe is a sub-expression that matches any non-ascii char - // in an IDN (Unicode) display URL. It's not strict - there are only ~15k - // valid Unicode points in IDN RFC (some with conditions). We are just going - // with being liberal with matching and then erroring if we fail to convert - // to punycode later (which validates chars fully). This at least ensures - // ascii chars dissalowed by the RC1123 parts above don't become legal - // again. - urlLabelUnicodeSubRe = "[^[:ascii:]]" - - // hostLabelSubRe is the sub-expression that matches a valid hostname label. - // It does not anchor the start or end so it can be composed into more - // complex RegExps below. Note that for sanity we don't handle disallowing - // raw punycode in this regexp (esp. since re2 doesn't support negative - // lookbehind, but we can capture it's presence here to check later). - hostLabelSubRe = "" + - // Match valid initial char, or unicode char - "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + - // Optionally, match 0 to 61 valid URL or Unicode chars, - // followed by one valid end char or unicode char - "(?:" + - "(?:" + urlLabelMidSubRe + "|" + urlLabelUnicodeSubRe + "){0,61}" + - "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + - ")?" - - // hostSubRe is the sub-expression that matches a valid host prefix. - // Allows custom port. - hostSubRe = hostLabelSubRe + "(?:\\." + hostLabelSubRe + ")+(?::\\d+)?" - - // hostRe is a regexp that matches a valid host prefix. Additional - // validation of unicode strings is needed for matches. - hostRe = regexp.MustCompile("^" + hostSubRe + "$") -) - -// FriendlyHost describes a registry instance identified in source strings by a -// simple bare hostname like registry.terraform.io. -type FriendlyHost struct { - Raw string -} - -func NewFriendlyHost(host string) *FriendlyHost { - return &FriendlyHost{Raw: host} -} - -// ParseFriendlyHost attempts to parse a valid "friendly host" prefix from the -// given string. If no valid prefix is found, host will be nil and rest will -// contain the full source string. The host prefix must terminate at the end of -// the input or at the first / character. If one or more characters exist after -// the first /, they will be returned as rest (without the / delimiter). -// Hostnames containing punycode WILL be parsed successfully since they may have -// come from an internal normalized source string, however should be considered -// invalid if the string came from a user directly. This must be checked -// explicitly for user-input strings by calling Valid() on the -// returned host. -func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) { - parts := strings.SplitN(source, "/", 2) - - if hostRe.MatchString(parts[0]) { - host = &FriendlyHost{Raw: parts[0]} - if len(parts) == 2 { - rest = parts[1] - } - return - } - - // No match, return whole string as rest along with nil host - rest = source - return -} - -// Valid returns whether the host prefix is considered valid in any case. -// Example of invalid prefixes might include ones that don't conform to the host -// name specifications. Not that IDN prefixes containing punycode are not valid -// input which we expect to always be in user-input or normalised display form. -func (h *FriendlyHost) Valid() bool { - return svchost.IsValid(h.Raw) -} - -// Display returns the host formatted for display to the user in CLI or web -// output. -func (h *FriendlyHost) Display() string { - return svchost.ForDisplay(h.Raw) -} - -// Normalized returns the host formatted for internal reference or comparison. -func (h *FriendlyHost) Normalized() string { - host, err := svchost.ForComparison(h.Raw) - if err != nil { - return InvalidHostString - } - return string(host) -} - -// String returns the host formatted as the user originally typed it assuming it -// was parsed from user input. -func (h *FriendlyHost) String() string { - return h.Raw -} - -// Equal compares the FriendlyHost against another instance taking normalization -// into account. Invalid hosts cannot be compared and will always return false. -func (h *FriendlyHost) Equal(other *FriendlyHost) bool { - if other == nil { - return false - } - - otherHost, err := svchost.ForComparison(other.Raw) - if err != nil { - return false - } - - host, err := svchost.ForComparison(h.Raw) - if err != nil { - return false - } - - return otherHost == host -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go deleted file mode 100644 index eb37481f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go +++ /dev/null @@ -1,175 +0,0 @@ -package regsrc - -import ( - "errors" - "fmt" - "regexp" - "strings" - - svchost "github.com/hashicorp/terraform-svchost" -) - -var ( - ErrInvalidModuleSource = errors.New("not a valid registry module source") - - // nameSubRe is the sub-expression that matches a valid module namespace or - // name. It's strictly a super-set of what GitHub allows for user/org and - // repo names respectively, but more restrictive than our original repo-name - // regex which allowed periods but could cause ambiguity with hostname - // prefixes. It does not anchor the start or end so it can be composed into - // more complex RegExps below. Alphanumeric with - and _ allowed in non - // leading or trailing positions. Max length 64 chars. (GitHub username is - // 38 max.) - nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" - - // providerSubRe is the sub-expression that matches a valid provider. It - // does not anchor the start or end so it can be composed into more complex - // RegExps below. Only lowercase chars and digits are supported in practice. - // Max length 64 chars. - providerSubRe = "[0-9a-z]{1,64}" - - // moduleSourceRe is a regular expression that matches the basic - // namespace/name/provider[//...] format for registry sources. It assumes - // any FriendlyHost prefix has already been removed if present. - moduleSourceRe = regexp.MustCompile( - fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", - nameSubRe, nameSubRe, providerSubRe)) - - // these hostnames are not allowed as registry sources, because they are - // already special case module sources in terraform. - disallowed = map[string]bool{ - "github.com": true, - "bitbucket.org": true, - } -) - -// Module describes a Terraform Registry Module source. -type Module struct { - // RawHost is the friendly host prefix if one was present. It might be nil - // if the original source had no host prefix which implies - // PublicRegistryHost but is distinct from having an actual pointer to - // PublicRegistryHost since it encodes the fact the original string didn't - // include a host prefix at all which is significant for recovering actual - // input not just normalized form. Most callers should access it with Host() - // which will return public registry host instance if it's nil. - RawHost *FriendlyHost - RawNamespace string - RawName string - RawProvider string - RawSubmodule string -} - -// ParseModuleSource attempts to parse source as a Terraform registry module -// source. If the string is not found to be in a valid format, -// ErrInvalidModuleSource is returned. Note that this can only be used on -// "input" strings, e.g. either ones supplied by the user or potentially -// normalised but in Display form (unicode). It will fail to parse a source with -// a punycoded domain since this is not permitted input from a user. If you have -// an already normalized string internally, you can compare it without parsing -// by comparing with the normalized version of the subject with the normal -// string equality operator. -func ParseModuleSource(source string) (*Module, error) { - // See if there is a friendly host prefix. - host, rest := ParseFriendlyHost(source) - if host != nil { - if !host.Valid() || disallowed[host.Display()] { - return nil, ErrInvalidModuleSource - } - } - - matches := moduleSourceRe.FindStringSubmatch(rest) - if len(matches) < 4 { - return nil, ErrInvalidModuleSource - } - - m := &Module{ - RawHost: host, - RawNamespace: matches[1], - RawName: matches[2], - RawProvider: matches[3], - } - - if len(matches) == 5 { - m.RawSubmodule = matches[4] - } - - return m, nil -} - -// Display returns the source formatted for display to the user in CLI or web -// output. -func (m *Module) Display() string { - return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) -} - -// Normalized returns the source formatted for internal reference or comparison. -func (m *Module) Normalized() string { - return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) -} - -// String returns the source formatted as the user originally typed it assuming -// it was parsed from user input. -func (m *Module) String() string { - // Don't normalize public registry hostname - leave it exactly like the user - // input it. - hostPrefix := "" - if m.RawHost != nil { - hostPrefix = m.RawHost.String() + "/" - } - return m.formatWithPrefix(hostPrefix, true) -} - -// Equal compares the module source against another instance taking -// normalization into account. -func (m *Module) Equal(other *Module) bool { - return m.Normalized() == other.Normalized() -} - -// Host returns the FriendlyHost object describing which registry this module is -// in. If the original source string had not host component this will return the -// PublicRegistryHost. -func (m *Module) Host() *FriendlyHost { - if m.RawHost == nil { - return PublicRegistryHost - } - return m.RawHost -} - -func (m *Module) normalizedHostPrefix(host string) string { - if m.Host().Equal(PublicRegistryHost) { - return "" - } - return host + "/" -} - -func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { - suffix := "" - if m.RawSubmodule != "" { - suffix = "//" + m.RawSubmodule - } - str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, - m.RawProvider, suffix) - - // lower case by default - if !preserveCase { - return strings.ToLower(str) - } - return str -} - -// Module returns just the registry ID of the module, without a hostname or -// suffix. -func (m *Module) Module() string { - return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) -} - -// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may -// contain an invalid hostname, this also returns an error indicating if it -// could be converted to a svchost.Hostname. If no host is specified, the -// default PublicRegistryHost is returned. -func (m *Module) SvcHost() (svchost.Hostname, error) { - if m.RawHost == nil { - return svchost.ForComparison(PublicRegistryHost.Raw) - } - return svchost.ForComparison(m.RawHost.Raw) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go deleted file mode 100644 index c430bf14..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package regsrc provides helpers for working with source strings that identify -// resources within a Terraform registry. -package regsrc - -var ( - // PublicRegistryHost is a FriendlyHost that represents the public registry. - PublicRegistryHost = NewFriendlyHost("registry.terraform.io") -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go deleted file mode 100644 index 7205d03b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go +++ /dev/null @@ -1,60 +0,0 @@ -package regsrc - -import ( - "fmt" - "runtime" - "strings" - - "github.com/hashicorp/terraform-svchost" -) - -var ( - // DefaultProviderNamespace represents the namespace for canonical - // HashiCorp-controlled providers. - DefaultProviderNamespace = "-" -) - -// TerraformProvider describes a Terraform Registry Provider source. -type TerraformProvider struct { - RawHost *FriendlyHost - RawNamespace string - RawName string - OS string - Arch string -} - -// NewTerraformProvider constructs a new provider source. -func NewTerraformProvider(name, os, arch string) *TerraformProvider { - if os == "" { - os = runtime.GOOS - } - if arch == "" { - arch = runtime.GOARCH - } - - // separate namespace if included - namespace := DefaultProviderNamespace - if names := strings.SplitN(name, "/", 2); len(names) == 2 { - namespace, name = names[0], names[1] - } - p := &TerraformProvider{ - RawHost: PublicRegistryHost, - RawNamespace: namespace, - RawName: name, - OS: os, - Arch: arch, - } - - return p -} - -// Provider returns just the registry ID of the provider -func (p *TerraformProvider) TerraformProvider() string { - return fmt.Sprintf("%s/%s", p.RawNamespace, p.RawName) -} - -// SvcHost returns the svchost.Hostname for this provider. The -// default PublicRegistryHost is returned. -func (p *TerraformProvider) SvcHost() (svchost.Hostname, error) { - return svchost.ForComparison(PublicRegistryHost.Raw) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go deleted file mode 100644 index 06163963..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go +++ /dev/null @@ -1,46 +0,0 @@ -package response - -// ModuleSubmodule is the metadata about a specific submodule within -// a module. This includes the root module as a special case. -type ModuleSubmodule struct { - Path string `json:"path"` - Readme string `json:"readme"` - Empty bool `json:"empty"` - - Inputs []*ModuleInput `json:"inputs"` - Outputs []*ModuleOutput `json:"outputs"` - Dependencies []*ModuleDep `json:"dependencies"` - Resources []*ModuleResource `json:"resources"` -} - -// ModuleInput is an input for a module. -type ModuleInput struct { - Name string `json:"name"` - Description string `json:"description"` - Default string `json:"default"` -} - -// ModuleOutput is an output for a module. -type ModuleOutput struct { - Name string `json:"name"` - Description string `json:"description"` -} - -// ModuleDep is an output for a module. -type ModuleDep struct { - Name string `json:"name"` - Source string `json:"source"` - Version string `json:"version"` -} - -// ModuleProviderDep is the output for a provider dependency -type ModuleProviderDep struct { - Name string `json:"name"` - Version string `json:"version"` -} - -// ModuleResource is an output for a module. -type ModuleResource struct { - Name string `json:"name"` - Type string `json:"type"` -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go deleted file mode 100644 index f69e9750..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go +++ /dev/null @@ -1,32 +0,0 @@ -package response - -// ModuleVersions is the response format that contains all metadata about module -// versions needed for terraform CLI to resolve version constraints. See RFC -// TF-042 for details on this format. -type ModuleVersions struct { - Modules []*ModuleProviderVersions `json:"modules"` -} - -// ModuleProviderVersions is the response format for a single module instance, -// containing metadata about all versions and their dependencies. -type ModuleProviderVersions struct { - Source string `json:"source"` - Versions []*ModuleVersion `json:"versions"` -} - -// ModuleVersion is the output metadata for a given version needed by CLI to -// resolve candidate versions to satisfy requirements. -type ModuleVersion struct { - Version string `json:"version"` - Root VersionSubmodule `json:"root"` - Submodules []*VersionSubmodule `json:"submodules"` -} - -// VersionSubmodule is the output metadata for a submodule within a given -// version needed by CLI to resolve candidate versions to satisfy requirements. -// When representing the Root in JSON the path is omitted. -type VersionSubmodule struct { - Path string `json:"path,omitempty"` - Providers []*ModuleProviderDep `json:"providers"` - Dependencies []*ModuleDep `json:"dependencies"` -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go deleted file mode 100644 index 75a92549..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go +++ /dev/null @@ -1,65 +0,0 @@ -package response - -import ( - "net/url" - "strconv" -) - -// PaginationMeta is a structure included in responses for pagination. -type PaginationMeta struct { - Limit int `json:"limit"` - CurrentOffset int `json:"current_offset"` - NextOffset *int `json:"next_offset,omitempty"` - PrevOffset *int `json:"prev_offset,omitempty"` - NextURL string `json:"next_url,omitempty"` - PrevURL string `json:"prev_url,omitempty"` -} - -// NewPaginationMeta populates pagination meta data from result parameters -func NewPaginationMeta(offset, limit int, hasMore bool, currentURL string) PaginationMeta { - pm := PaginationMeta{ - Limit: limit, - CurrentOffset: offset, - } - - // Calculate next/prev offsets, leave nil if not valid pages - nextOffset := offset + limit - if hasMore { - pm.NextOffset = &nextOffset - } - - prevOffset := offset - limit - if prevOffset < 0 { - prevOffset = 0 - } - if prevOffset < offset { - pm.PrevOffset = &prevOffset - } - - // If URL format provided, populate URLs. Intentionally swallow URL errors for now, API should - // catch missing URLs if we call with bad URL arg (and we care about them being present). - if currentURL != "" && pm.NextOffset != nil { - pm.NextURL, _ = setQueryParam(currentURL, "offset", *pm.NextOffset, 0) - } - if currentURL != "" && pm.PrevOffset != nil { - pm.PrevURL, _ = setQueryParam(currentURL, "offset", *pm.PrevOffset, 0) - } - - return pm -} - -func setQueryParam(baseURL, key string, val, defaultVal int) (string, error) { - u, err := url.Parse(baseURL) - if err != nil { - return "", err - } - q := u.Query() - if val == defaultVal { - // elide param if it's the default value - q.Del(key) - } else { - q.Set(key, strconv.Itoa(val)) - } - u.RawQuery = q.Encode() - return u.String(), nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go deleted file mode 100644 index c2c333b0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go +++ /dev/null @@ -1,95 +0,0 @@ -package response - -import ( - "sort" - "strings" - - version "github.com/hashicorp/go-version" -) - -// TerraformProvider is the response structure for all required information for -// Terraform to choose a download URL. It must include all versions and all -// platforms for Terraform to perform version and os/arch constraint matching -// locally. -type TerraformProvider struct { - ID string `json:"id"` - - Versions []*TerraformProviderVersion `json:"versions"` -} - -// TerraformProviderVersion is the Terraform-specific response structure for a -// provider version. -type TerraformProviderVersion struct { - Version string `json:"version"` - Protocols []string `json:"protocols"` - - Platforms []*TerraformProviderPlatform `json:"platforms"` -} - -// TerraformProviderVersions is the Terraform-specific response structure for an -// array of provider versions -type TerraformProviderVersions struct { - ID string `json:"id"` - Versions []*TerraformProviderVersion `json:"versions"` - Warnings []string `json:"warnings"` -} - -// TerraformProviderPlatform is the Terraform-specific response structure for a -// provider platform. -type TerraformProviderPlatform struct { - OS string `json:"os"` - Arch string `json:"arch"` -} - -// TerraformProviderPlatformLocation is the Terraform-specific response -// structure for a provider platform with all details required to perform a -// download. -type TerraformProviderPlatformLocation struct { - Protocols []string `json:"protocols"` - OS string `json:"os"` - Arch string `json:"arch"` - Filename string `json:"filename"` - DownloadURL string `json:"download_url"` - ShasumsURL string `json:"shasums_url"` - ShasumsSignatureURL string `json:"shasums_signature_url"` - Shasum string `json:"shasum"` - - SigningKeys SigningKeyList `json:"signing_keys"` -} - -// SigningKeyList is the response structure for a list of signing keys. -type SigningKeyList struct { - GPGKeys []*GPGKey `json:"gpg_public_keys"` -} - -// GPGKey is the response structure for a GPG key. -type GPGKey struct { - ASCIIArmor string `json:"ascii_armor"` - Source string `json:"source"` - SourceURL *string `json:"source_url"` -} - -// Collection type for TerraformProviderVersion -type ProviderVersionCollection []*TerraformProviderVersion - -// GPGASCIIArmor returns an ASCII-armor-formatted string for all of the gpg -// keys in the response. -func (signingKeys *SigningKeyList) GPGASCIIArmor() string { - keys := []string{} - - for _, gpgKey := range signingKeys.GPGKeys { - keys = append(keys, gpgKey.ASCIIArmor) - } - - return strings.Join(keys, "\n") -} - -// Sort sorts versions from newest to oldest. -func (v ProviderVersionCollection) Sort() { - sort.Slice(v, func(i, j int) bool { - versionA, _ := version.NewVersion(v[i].Version) - versionB, _ := version.NewVersion(v[j].Version) - - return versionA.GreaterThan(versionB) - }) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go deleted file mode 100644 index 7dd74ac7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package states contains the types that are used to represent Terraform -// states. -package states diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go deleted file mode 100644 index 0dc73499..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go +++ /dev/null @@ -1,35 +0,0 @@ -// Code generated by "stringer -type EachMode"; DO NOT EDIT. - -package states - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[NoEach-0] - _ = x[EachList-76] - _ = x[EachMap-77] -} - -const ( - _EachMode_name_0 = "NoEach" - _EachMode_name_1 = "EachListEachMap" -) - -var ( - _EachMode_index_1 = [...]uint8{0, 8, 15} -) - -func (i EachMode) String() string { - switch { - case i == 0: - return _EachMode_name_0 - case 76 <= i && i <= 77: - i -= 76 - return _EachMode_name_1[_EachMode_index_1[i]:_EachMode_index_1[i+1]] - default: - return "EachMode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go deleted file mode 100644 index 891adc00..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go +++ /dev/null @@ -1,20 +0,0 @@ -package states - -// Generation is used to represent multiple objects in a succession of objects -// represented by a single resource instance address. A resource instance can -// have multiple generations over its lifetime due to object replacement -// (when a change can't be applied without destroying and re-creating), and -// multiple generations can exist at the same time when create_before_destroy -// is used. -// -// A Generation value can either be the value of the variable "CurrentGen" or -// a value of type DeposedKey. Generation values can be compared for equality -// using "==" and used as map keys. The zero value of Generation (nil) is not -// a valid generation and must not be used. -type Generation interface { - generation() -} - -// CurrentGen is the Generation representing the currently-active object for -// a resource instance. -var CurrentGen Generation diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go deleted file mode 100644 index 3bb717d3..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go +++ /dev/null @@ -1,120 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// ResourceInstanceObject is the local representation of a specific remote -// object associated with a resource instance. In practice not all remote -// objects are actually remote in the sense of being accessed over the network, -// but this is the most common case. -// -// It is not valid to mutate a ResourceInstanceObject once it has been created. -// Instead, create a new object and replace the existing one. -type ResourceInstanceObject struct { - // Value is the object-typed value representing the remote object within - // Terraform. - Value cty.Value - - // Private is an opaque value set by the provider when this object was - // last created or updated. Terraform Core does not use this value in - // any way and it is not exposed anywhere in the user interface, so - // a provider can use it for retaining any necessary private state. - Private []byte - - // Status represents the "readiness" of the object as of the last time - // it was updated. - Status ObjectStatus - - // Dependencies is a set of other addresses in the same module which - // this instance depended on when the given attributes were evaluated. - // This is used to construct the dependency relationships for an object - // whose configuration is no longer available, such as if it has been - // removed from configuration altogether, or is now deposed. - Dependencies []addrs.Referenceable -} - -// ObjectStatus represents the status of a RemoteObject. -type ObjectStatus rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type ObjectStatus - -const ( - // ObjectReady is an object status for an object that is ready to use. - ObjectReady ObjectStatus = 'R' - - // ObjectTainted is an object status representing an object that is in - // an unrecoverable bad state due to a partial failure during a create, - // update, or delete operation. Since it cannot be moved into the - // ObjectRead state, a tainted object must be replaced. - ObjectTainted ObjectStatus = 'T' - - // ObjectPlanned is a special object status used only for the transient - // placeholder objects we place into state during the refresh and plan - // walks to stand in for objects that will be created during apply. - // - // Any object of this status must have a corresponding change recorded - // in the current plan, whose value must then be used in preference to - // the value stored in state when evaluating expressions. A planned - // object stored in state will be incomplete if any of its attributes are - // not yet known, and the plan must be consulted in order to "see" those - // unknown values, because the state is not able to represent them. - ObjectPlanned ObjectStatus = 'P' -) - -// Encode marshals the value within the receiver to produce a -// ResourceInstanceObjectSrc ready to be written to a state file. -// -// The given type must be the implied type of the resource type schema, and -// the given value must conform to it. It is important to pass the schema -// type and not the object's own type so that dynamically-typed attributes -// will be stored correctly. The caller must also provide the version number -// of the schema that the given type was derived from, which will be recorded -// in the source object so it can be used to detect when schema migration is -// required on read. -// -// The returned object may share internal references with the receiver and -// so the caller must not mutate the receiver any further once once this -// method is called. -func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) { - // Our state serialization can't represent unknown values, so we convert - // them to nulls here. This is lossy, but nobody should be writing unknown - // values here and expecting to get them out again later. - // - // We get unknown values here while we're building out a "planned state" - // during the plan phase, but the value stored in the plan takes precedence - // for expression evaluation. The apply step should never produce unknown - // values, but if it does it's the responsibility of the caller to detect - // and raise an error about that. - val := cty.UnknownAsNull(o.Value) - - src, err := ctyjson.Marshal(val, ty) - if err != nil { - return nil, err - } - - return &ResourceInstanceObjectSrc{ - SchemaVersion: schemaVersion, - AttrsJSON: src, - Private: o.Private, - Status: o.Status, - Dependencies: o.Dependencies, - }, nil -} - -// AsTainted returns a deep copy of the receiver with the status updated to -// ObjectTainted. -func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject { - if o == nil { - // A nil object can't be tainted, but we'll allow this anyway to - // avoid a crash, since we presumably intend to eventually record - // the object has having been deleted anyway. - return nil - } - ret := o.DeepCopy() - ret.Status = ObjectTainted - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go deleted file mode 100644 index 728ad80d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go +++ /dev/null @@ -1,113 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" -) - -// ResourceInstanceObjectSrc is a not-fully-decoded version of -// ResourceInstanceObject. Decoding of it can be completed by first handling -// any schema migration steps to get to the latest schema version and then -// calling method Decode with the implied type of the latest schema. -type ResourceInstanceObjectSrc struct { - // SchemaVersion is the resource-type-specific schema version number that - // was current when either AttrsJSON or AttrsFlat was encoded. Migration - // steps are required if this is less than the current version number - // reported by the corresponding provider. - SchemaVersion uint64 - - // AttrsJSON is a JSON-encoded representation of the object attributes, - // encoding the value (of the object type implied by the associated resource - // type schema) that represents this remote object in Terraform Language - // expressions, and is compared with configuration when producing a diff. - // - // This is retained in JSON format here because it may require preprocessing - // before decoding if, for example, the stored attributes are for an older - // schema version which the provider must upgrade before use. If the - // version is current, it is valid to simply decode this using the - // type implied by the current schema, without the need for the provider - // to perform an upgrade first. - // - // When writing a ResourceInstanceObject into the state, AttrsJSON should - // always be conformant to the current schema version and the current - // schema version should be recorded in the SchemaVersion field. - AttrsJSON []byte - - // AttrsFlat is a legacy form of attributes used in older state file - // formats, and in the new state format for objects that haven't yet been - // upgraded. This attribute is mutually exclusive with Attrs: for any - // ResourceInstanceObject, only one of these attributes may be populated - // and the other must be nil. - // - // An instance object with this field populated should be upgraded to use - // Attrs at the earliest opportunity, since this legacy flatmap-based - // format will be phased out over time. AttrsFlat should not be used when - // writing new or updated objects to state; instead, callers must follow - // the recommendations in the AttrsJSON documentation above. - AttrsFlat map[string]string - - // These fields all correspond to the fields of the same name on - // ResourceInstanceObject. - Private []byte - Status ObjectStatus - Dependencies []addrs.Referenceable -} - -// Decode unmarshals the raw representation of the object attributes. Pass the -// implied type of the corresponding resource type schema for correct operation. -// -// Before calling Decode, the caller must check that the SchemaVersion field -// exactly equals the version number of the schema whose implied type is being -// passed, or else the result is undefined. -// -// The returned object may share internal references with the receiver and -// so the caller must not mutate the receiver any further once once this -// method is called. -func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) { - var val cty.Value - var err error - if os.AttrsFlat != nil { - // Legacy mode. We'll do our best to unpick this from the flatmap. - val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty) - if err != nil { - return nil, err - } - } else { - val, err = ctyjson.Unmarshal(os.AttrsJSON, ty) - if err != nil { - return nil, err - } - } - - return &ResourceInstanceObject{ - Value: val, - Status: os.Status, - Dependencies: os.Dependencies, - Private: os.Private, - }, nil -} - -// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the -// metadata from the receiver and writing in the given new schema version -// and attribute value that are presumed to have resulted from upgrading -// from an older schema version. -func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) { - new := os.DeepCopy() - new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap - - // This is the same principle as ResourceInstanceObject.Encode, but - // avoiding a decode/re-encode cycle because we don't have type info - // available for the "old" attributes. - newAttrs = cty.UnknownAsNull(newAttrs) - src, err := ctyjson.Marshal(newAttrs, newType) - if err != nil { - return nil, err - } - - new.AttrsJSON = src - new.SchemaVersion = newSchemaVersion - return new, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go deleted file mode 100644 index 6b74cbfa..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go +++ /dev/null @@ -1,268 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Module is a container for the states of objects within a particular module. -type Module struct { - Addr addrs.ModuleInstance - - // Resources contains the state for each resource. The keys in this map are - // an implementation detail and must not be used by outside callers. - Resources map[string]*Resource - - // OutputValues contains the state for each output value. The keys in this - // map are output value names. - OutputValues map[string]*OutputValue - - // LocalValues contains the value for each named output value. The keys - // in this map are local value names. - LocalValues map[string]cty.Value -} - -// NewModule constructs an empty module state for the given module address. -func NewModule(addr addrs.ModuleInstance) *Module { - return &Module{ - Addr: addr, - Resources: map[string]*Resource{}, - OutputValues: map[string]*OutputValue{}, - LocalValues: map[string]cty.Value{}, - } -} - -// Resource returns the state for the resource with the given address within -// the receiving module state, or nil if the requested resource is not tracked -// in the state. -func (ms *Module) Resource(addr addrs.Resource) *Resource { - return ms.Resources[addr.String()] -} - -// ResourceInstance returns the state for the resource instance with the given -// address within the receiving module state, or nil if the requested instance -// is not tracked in the state. -func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { - rs := ms.Resource(addr.Resource) - if rs == nil { - return nil - } - return rs.Instance(addr.Key) -} - -// SetResourceMeta updates the resource-level metadata for the resource -// with the given address, creating the resource state for it if it doesn't -// already exist. -func (ms *Module) SetResourceMeta(addr addrs.Resource, eachMode EachMode, provider addrs.AbsProviderConfig) { - rs := ms.Resource(addr) - if rs == nil { - rs = &Resource{ - Addr: addr, - Instances: map[addrs.InstanceKey]*ResourceInstance{}, - } - ms.Resources[addr.String()] = rs - } - - rs.EachMode = eachMode - rs.ProviderConfig = provider -} - -// RemoveResource removes the entire state for the given resource, taking with -// it any instances associated with the resource. This should generally be -// called only for resource objects whose instances have all been destroyed. -func (ms *Module) RemoveResource(addr addrs.Resource) { - delete(ms.Resources, addr.String()) -} - -// SetResourceInstanceCurrent saves the given instance object as the current -// generation of the resource instance with the given address, simulataneously -// updating the recorded provider configuration address, dependencies, and -// resource EachMode. -// -// Any existing current instance object for the given resource is overwritten. -// Set obj to nil to remove the primary generation object altogether. If there -// are no deposed objects then the instance will be removed altogether. -// -// The provider address and "each mode" are resource-wide settings and so they -// are updated for all other instances of the same resource as a side-effect of -// this call. -func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) - - rs := ms.Resource(addr.Resource) - is := rs.EnsureInstance(addr.Key) - - is.Current = obj - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// SetResourceInstanceDeposed saves the given instance object as a deposed -// generation of the resource instance with the given address and deposed key. -// -// Call this method only for pre-existing deposed objects that already have -// a known DeposedKey. For example, this method is useful if reloading objects -// that were persisted to a state file. To mark the current object as deposed, -// use DeposeResourceInstanceObject instead. -// -// The resource that contains the given instance must already exist in the -// state, or this method will panic. Use Resource to check first if its -// presence is not already guaranteed. -// -// Any existing current instance object for the given resource and deposed key -// is overwritten. Set obj to nil to remove the deposed object altogether. If -// the instance is left with no objects after this operation then it will -// be removed from its containing resource altogether. -func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) - - rs := ms.Resource(addr.Resource) - is := rs.EnsureInstance(addr.Key) - if obj != nil { - is.Deposed[key] = obj - } else { - delete(is.Deposed, key) - } - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// ForgetResourceInstanceDeposed removes the record of the deposed object with -// the given address and key, if present. If not present, this is a no-op. -func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { - rs := ms.Resource(addr.Resource) - if rs == nil { - return - } - is := rs.Instance(addr.Key) - if is == nil { - return - } - delete(is.Deposed, key) - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// deposeResourceInstanceObject is the real implementation of -// SyncState.DeposeResourceInstanceObject. -func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { - is := ms.ResourceInstance(addr) - if is == nil { - return NotDeposed - } - return is.deposeCurrentObject(forceKey) -} - -// maybeRestoreResourceInstanceDeposed is the real implementation of -// SyncState.MaybeRestoreResourceInstanceDeposed. -func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { - rs := ms.Resource(addr.Resource) - if rs == nil { - return false - } - is := rs.Instance(addr.Key) - if is == nil { - return false - } - if is.Current != nil { - return false - } - if len(is.Deposed) == 0 { - return false - } - is.Current = is.Deposed[key] - delete(is.Deposed, key) - return true -} - -// SetOutputValue writes an output value into the state, overwriting any -// existing value of the same name. -func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { - os := &OutputValue{ - Value: value, - Sensitive: sensitive, - } - ms.OutputValues[name] = os - return os -} - -// RemoveOutputValue removes the output value of the given name from the state, -// if it exists. This method is a no-op if there is no value of the given -// name. -func (ms *Module) RemoveOutputValue(name string) { - delete(ms.OutputValues, name) -} - -// SetLocalValue writes a local value into the state, overwriting any -// existing value of the same name. -func (ms *Module) SetLocalValue(name string, value cty.Value) { - ms.LocalValues[name] = value -} - -// RemoveLocalValue removes the local value of the given name from the state, -// if it exists. This method is a no-op if there is no value of the given -// name. -func (ms *Module) RemoveLocalValue(name string) { - delete(ms.LocalValues, name) -} - -// PruneResourceHusks is a specialized method that will remove any Resource -// objects that do not contain any instances, even if they have an EachMode. -// -// You probably shouldn't call this! See the method of the same name on -// type State for more information on what this is for and the rare situations -// where it is safe to use. -func (ms *Module) PruneResourceHusks() { - for _, rs := range ms.Resources { - if len(rs.Instances) == 0 { - ms.RemoveResource(rs.Addr) - } - } -} - -// empty returns true if the receving module state is contributing nothing -// to the state. In other words, it returns true if the module could be -// removed from the state altogether without changing the meaning of the state. -// -// In practice a module containing no objects is the same as a non-existent -// module, and so we can opportunistically clean up once a module becomes -// empty on the assumption that it will be re-added if needed later. -func (ms *Module) empty() bool { - if ms == nil { - return true - } - - // This must be updated to cover any new collections added to Module - // in future. - return (len(ms.Resources) == 0 && - len(ms.OutputValues) == 0 && - len(ms.LocalValues) == 0) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go deleted file mode 100644 index 96a6db2f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT. - -package states - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ObjectReady-82] - _ = x[ObjectTainted-84] - _ = x[ObjectPlanned-80] -} - -const ( - _ObjectStatus_name_0 = "ObjectPlanned" - _ObjectStatus_name_1 = "ObjectReady" - _ObjectStatus_name_2 = "ObjectTainted" -) - -func (i ObjectStatus) String() string { - switch { - case i == 80: - return _ObjectStatus_name_0 - case i == 82: - return _ObjectStatus_name_1 - case i == 84: - return _ObjectStatus_name_2 - default: - return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go deleted file mode 100644 index d232b76d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go +++ /dev/null @@ -1,14 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" -) - -// OutputValue represents the state of a particular output value. -// -// It is not valid to mutate an OutputValue object once it has been created. -// Instead, create an entirely new OutputValue to replace the previous one. -type OutputValue struct { - Value cty.Value - Sensitive bool -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go deleted file mode 100644 index 32ea638a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go +++ /dev/null @@ -1,233 +0,0 @@ -package states - -import ( - "fmt" - "math/rand" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Resource represents the state of a resource. -type Resource struct { - // Addr is the module-relative address for the resource this state object - // belongs to. - Addr addrs.Resource - - // EachMode is the multi-instance mode currently in use for this resource, - // or NoEach if this is a single-instance resource. This dictates what - // type of value is returned when accessing this resource via expressions - // in the Terraform language. - EachMode EachMode - - // Instances contains the potentially-multiple instances associated with - // this resource. This map can contain a mixture of different key types, - // but only the ones of InstanceKeyType are considered current. - Instances map[addrs.InstanceKey]*ResourceInstance - - // ProviderConfig is the absolute address for the provider configuration that - // most recently managed this resource. This is used to connect a resource - // with a provider configuration when the resource configuration block is - // not available, such as if it has been removed from configuration - // altogether. - ProviderConfig addrs.AbsProviderConfig -} - -// Instance returns the state for the instance with the given key, or nil -// if no such instance is tracked within the state. -func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { - return rs.Instances[key] -} - -// EnsureInstance returns the state for the instance with the given key, -// creating a new empty state for it if one doesn't already exist. -// -// Because this may create and save a new state, it is considered to be -// a write operation. -func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { - ret := rs.Instance(key) - if ret == nil { - ret = NewResourceInstance() - rs.Instances[key] = ret - } - return ret -} - -// ResourceInstance represents the state of a particular instance of a resource. -type ResourceInstance struct { - // Current, if non-nil, is the remote object that is currently represented - // by the corresponding resource instance. - Current *ResourceInstanceObjectSrc - - // Deposed, if len > 0, contains any remote objects that were previously - // represented by the corresponding resource instance but have been - // replaced and are pending destruction due to the create_before_destroy - // lifecycle mode. - Deposed map[DeposedKey]*ResourceInstanceObjectSrc -} - -// NewResourceInstance constructs and returns a new ResourceInstance, ready to -// use. -func NewResourceInstance() *ResourceInstance { - return &ResourceInstance{ - Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, - } -} - -// HasCurrent returns true if this resource instance has a "current"-generation -// object. Most instances do, but this can briefly be false during a -// create-before-destroy replace operation when the current has been deposed -// but its replacement has not yet been created. -func (i *ResourceInstance) HasCurrent() bool { - return i != nil && i.Current != nil -} - -// HasDeposed returns true if this resource instance has a deposed object -// with the given key. -func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { - return i != nil && i.Deposed[key] != nil -} - -// HasObjects returns true if this resource has any objects at all, whether -// current or deposed. -func (i *ResourceInstance) HasObjects() bool { - return i.Current != nil || len(i.Deposed) != 0 -} - -// deposeCurrentObject is part of the real implementation of -// SyncState.DeposeResourceInstanceObject. The exported method uses a lock -// to ensure that we can safely allocate an unused deposed key without -// collision. -func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { - if !i.HasCurrent() { - return NotDeposed - } - - key := forceKey - if key == NotDeposed { - key = i.findUnusedDeposedKey() - } else { - if _, exists := i.Deposed[key]; exists { - panic(fmt.Sprintf("forced key %s is already in use", forceKey)) - } - } - i.Deposed[key] = i.Current - i.Current = nil - return key -} - -// GetGeneration retrieves the object of the given generation from the -// ResourceInstance, or returns nil if there is no such object. -// -// If the given generation is nil or invalid, this method will panic. -func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { - if gen == CurrentGen { - return i.Current - } - if dk, ok := gen.(DeposedKey); ok { - return i.Deposed[dk] - } - if gen == nil { - panic(fmt.Sprintf("get with nil Generation")) - } - // Should never fall out here, since the above covers all possible - // Generation values. - panic(fmt.Sprintf("get invalid Generation %#v", gen)) -} - -// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to -// already be in use for this instance at the time of the call. -// -// Note that the validity of this result may change if new deposed keys are -// allocated before it is used. To avoid this risk, instead use the -// DeposeResourceInstanceObject method on the SyncState wrapper type, which -// allocates a key and uses it atomically. -func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { - return i.findUnusedDeposedKey() -} - -// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to -// already be in use for this instance. -func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { - for { - key := NewDeposedKey() - if _, exists := i.Deposed[key]; !exists { - return key - } - // Spin until we find a unique one. This shouldn't take long, because - // we have a 32-bit keyspace and there's rarely more than one deposed - // instance. - } -} - -// EachMode specifies the multi-instance mode for a resource. -type EachMode rune - -const ( - NoEach EachMode = 0 - EachList EachMode = 'L' - EachMap EachMode = 'M' -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type EachMode - -func eachModeForInstanceKey(key addrs.InstanceKey) EachMode { - switch key.(type) { - case addrs.IntKey: - return EachList - case addrs.StringKey: - return EachMap - default: - if key == addrs.NoKey { - return NoEach - } - panic(fmt.Sprintf("don't know an each mode for instance key %#v", key)) - } -} - -// DeposedKey is a 8-character hex string used to uniquely identify deposed -// instance objects in the state. -type DeposedKey string - -// NotDeposed is a special invalid value of DeposedKey that is used to represent -// the absense of a deposed key. It must not be used as an actual deposed key. -const NotDeposed = DeposedKey("") - -var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) - -// NewDeposedKey generates a pseudo-random deposed key. Because of the short -// length of these keys, uniqueness is not a natural consequence and so the -// caller should test to see if the generated key is already in use and generate -// another if so, until a unique key is found. -func NewDeposedKey() DeposedKey { - v := deposedKeyRand.Uint32() - return DeposedKey(fmt.Sprintf("%08x", v)) -} - -func (k DeposedKey) String() string { - return string(k) -} - -func (k DeposedKey) GoString() string { - ks := string(k) - switch { - case ks == "": - return "states.NotDeposed" - default: - return fmt.Sprintf("states.DeposedKey(%s)", ks) - } -} - -// Generation is a helper method to convert a DeposedKey into a Generation. -// If the reciever is anything other than NotDeposed then the result is -// just the same value as a Generation. If the receiver is NotDeposed then -// the result is CurrentGen. -func (k DeposedKey) Generation() Generation { - if k == NotDeposed { - return CurrentGen - } - return k -} - -// generation is an implementation of Generation. -func (k DeposedKey) generation() {} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go deleted file mode 100644 index 328dd53d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go +++ /dev/null @@ -1,229 +0,0 @@ -package states - -import ( - "sort" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// State is the top-level type of a Terraform state. -// -// A state should be mutated only via its accessor methods, to ensure that -// invariants are preserved. -// -// Access to State and the nested values within it is not concurrency-safe, -// so when accessing a State object concurrently it is the caller's -// responsibility to ensure that only one write is in progress at a time -// and that reads only occur when no write is in progress. The most common -// way to acheive this is to wrap the State in a SyncState and use the -// higher-level atomic operations supported by that type. -type State struct { - // Modules contains the state for each module. The keys in this map are - // an implementation detail and must not be used by outside callers. - Modules map[string]*Module -} - -// NewState constructs a minimal empty state, containing an empty root module. -func NewState() *State { - modules := map[string]*Module{} - modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) - return &State{ - Modules: modules, - } -} - -// BuildState is a helper -- primarily intended for tests -- to build a state -// using imperative code against the StateSync type while still acting as -// an expression of type *State to assign into a containing struct. -func BuildState(cb func(*SyncState)) *State { - s := NewState() - cb(s.SyncWrapper()) - return s -} - -// Empty returns true if there are no resources or populated output values -// in the receiver. In other words, if this state could be safely replaced -// with the return value of NewState and be functionally equivalent. -func (s *State) Empty() bool { - if s == nil { - return true - } - for _, ms := range s.Modules { - if len(ms.Resources) != 0 { - return false - } - if len(ms.OutputValues) != 0 { - return false - } - } - return true -} - -// Module returns the state for the module with the given address, or nil if -// the requested module is not tracked in the state. -func (s *State) Module(addr addrs.ModuleInstance) *Module { - if s == nil { - panic("State.Module on nil *State") - } - return s.Modules[addr.String()] -} - -// RemoveModule removes the module with the given address from the state, -// unless it is the root module. The root module cannot be deleted, and so -// this method will panic if that is attempted. -// -// Removing a module implicitly discards all of the resources, outputs and -// local values within it, and so this should usually be done only for empty -// modules. For callers accessing the state through a SyncState wrapper, modules -// are automatically pruned if they are empty after one of their contained -// elements is removed. -func (s *State) RemoveModule(addr addrs.ModuleInstance) { - if addr.IsRoot() { - panic("attempted to remove root module") - } - - delete(s.Modules, addr.String()) -} - -// RootModule is a convenient alias for Module(addrs.RootModuleInstance). -func (s *State) RootModule() *Module { - if s == nil { - panic("RootModule called on nil State") - } - return s.Modules[addrs.RootModuleInstance.String()] -} - -// EnsureModule returns the state for the module with the given address, -// creating and adding a new one if necessary. -// -// Since this might modify the state to add a new instance, it is considered -// to be a write operation. -func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { - ms := s.Module(addr) - if ms == nil { - ms = NewModule(addr) - s.Modules[addr.String()] = ms - } - return ms -} - -// HasResources returns true if there is at least one resource (of any mode) -// present in the receiving state. -func (s *State) HasResources() bool { - if s == nil { - return false - } - for _, ms := range s.Modules { - if len(ms.Resources) > 0 { - return true - } - } - return false -} - -// Resource returns the state for the resource with the given address, or nil -// if no such resource is tracked in the state. -func (s *State) Resource(addr addrs.AbsResource) *Resource { - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.Resource(addr.Resource) -} - -// ResourceInstance returns the state for the resource instance with the given -// address, or nil if no such resource is tracked in the state. -func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { - if s == nil { - panic("State.ResourceInstance on nil *State") - } - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.ResourceInstance(addr.Resource) -} - -// OutputValue returns the state for the output value with the given address, -// or nil if no such output value is tracked in the state. -func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.OutputValues[addr.OutputValue.Name] -} - -// LocalValue returns the value of the named local value with the given address, -// or cty.NilVal if no such value is tracked in the state. -func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { - ms := s.Module(addr.Module) - if ms == nil { - return cty.NilVal - } - return ms.LocalValues[addr.LocalValue.Name] -} - -// ProviderAddrs returns a list of all of the provider configuration addresses -// referenced throughout the receiving state. -// -// The result is de-duplicated so that each distinct address appears only once. -func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { - if s == nil { - return nil - } - - m := map[string]addrs.AbsProviderConfig{} - for _, ms := range s.Modules { - for _, rc := range ms.Resources { - m[rc.ProviderConfig.String()] = rc.ProviderConfig - } - } - if len(m) == 0 { - return nil - } - - // This is mainly just so we'll get stable results for testing purposes. - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - ret := make([]addrs.AbsProviderConfig, len(keys)) - for i, key := range keys { - ret[i] = m[key] - } - - return ret -} - -// PruneResourceHusks is a specialized method that will remove any Resource -// objects that do not contain any instances, even if they have an EachMode. -// -// This should generally be used only after a "terraform destroy" operation, -// to finalize the cleanup of the state. It is not correct to use this after -// other operations because if a resource has "count = 0" or "for_each" over -// an empty collection then we want to retain it in the state so that references -// to it, particularly in "strange" contexts like "terraform console", can be -// properly resolved. -// -// This method MUST NOT be called concurrently with other readers and writers -// of the receiving state. -func (s *State) PruneResourceHusks() { - for _, m := range s.Modules { - m.PruneResourceHusks() - if len(m.Resources) == 0 && !m.Addr.IsRoot() { - s.RemoveModule(m.Addr) - } - } -} - -// SyncWrapper returns a SyncState object wrapping the receiver. -func (s *State) SyncWrapper() *SyncState { - return &SyncState{ - state: s, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go deleted file mode 100644 index 6266aca7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go +++ /dev/null @@ -1,221 +0,0 @@ -package states - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// Taking deep copies of states is an important operation because state is -// otherwise a mutable data structure that is challenging to share across -// many separate callers. It is important that the DeepCopy implementations -// in this file comprehensively copy all parts of the state data structure -// that could be mutated via pointers. - -// DeepCopy returns a new state that contains equivalent data to the reciever -// but shares no backing memory in common. -// -// As with all methods on State, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (s *State) DeepCopy() *State { - if s == nil { - return nil - } - - modules := make(map[string]*Module, len(s.Modules)) - for k, m := range s.Modules { - modules[k] = m.DeepCopy() - } - return &State{ - Modules: modules, - } -} - -// DeepCopy returns a new module state that contains equivalent data to the -// receiver but shares no backing memory in common. -// -// As with all methods on Module, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (ms *Module) DeepCopy() *Module { - if ms == nil { - return nil - } - - resources := make(map[string]*Resource, len(ms.Resources)) - for k, r := range ms.Resources { - resources[k] = r.DeepCopy() - } - outputValues := make(map[string]*OutputValue, len(ms.OutputValues)) - for k, v := range ms.OutputValues { - outputValues[k] = v.DeepCopy() - } - localValues := make(map[string]cty.Value, len(ms.LocalValues)) - for k, v := range ms.LocalValues { - // cty.Value is immutable, so we don't need to copy these. - localValues[k] = v - } - - return &Module{ - Addr: ms.Addr, // technically mutable, but immutable by convention - Resources: resources, - OutputValues: outputValues, - LocalValues: localValues, - } -} - -// DeepCopy returns a new resource state that contains equivalent data to the -// receiver but shares no backing memory in common. -// -// As with all methods on Resource, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (rs *Resource) DeepCopy() *Resource { - if rs == nil { - return nil - } - - instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances)) - for k, i := range rs.Instances { - instances[k] = i.DeepCopy() - } - - return &Resource{ - Addr: rs.Addr, - EachMode: rs.EachMode, - Instances: instances, - ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention - } -} - -// DeepCopy returns a new resource instance state that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstance, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (is *ResourceInstance) DeepCopy() *ResourceInstance { - if is == nil { - return nil - } - - deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed)) - for k, obj := range is.Deposed { - deposed[k] = obj.DeepCopy() - } - - return &ResourceInstance{ - Current: is.Current.DeepCopy(), - Deposed: deposed, - } -} - -// DeepCopy returns a new resource instance object that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstanceObjectSrc, this method is not safe to -// use concurrently with writing to any portion of the recieving data structure. -// It is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { - if obj == nil { - return nil - } - - var attrsFlat map[string]string - if obj.AttrsFlat != nil { - attrsFlat = make(map[string]string, len(obj.AttrsFlat)) - for k, v := range obj.AttrsFlat { - attrsFlat[k] = v - } - } - - var attrsJSON []byte - if obj.AttrsJSON != nil { - attrsJSON = make([]byte, len(obj.AttrsJSON)) - copy(attrsJSON, obj.AttrsJSON) - } - - var private []byte - if obj.Private != nil { - private = make([]byte, len(obj.Private)) - copy(private, obj.Private) - } - - // Some addrs.Referencable implementations are technically mutable, but - // we treat them as immutable by convention and so we don't deep-copy here. - dependencies := make([]addrs.Referenceable, len(obj.Dependencies)) - copy(dependencies, obj.Dependencies) - - return &ResourceInstanceObjectSrc{ - Status: obj.Status, - SchemaVersion: obj.SchemaVersion, - Private: private, - AttrsFlat: attrsFlat, - AttrsJSON: attrsJSON, - Dependencies: dependencies, - } -} - -// DeepCopy returns a new resource instance object that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstanceObject, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { - if obj == nil { - return nil - } - - var private []byte - if obj.Private != nil { - private = make([]byte, len(obj.Private)) - copy(private, obj.Private) - } - - // Some addrs.Referenceable implementations are technically mutable, but - // we treat them as immutable by convention and so we don't deep-copy here. - var dependencies []addrs.Referenceable - if obj.Dependencies != nil { - dependencies = make([]addrs.Referenceable, len(obj.Dependencies)) - copy(dependencies, obj.Dependencies) - } - - return &ResourceInstanceObject{ - Value: obj.Value, - Status: obj.Status, - Private: private, - Dependencies: dependencies, - } -} - -// DeepCopy returns a new output value state that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on OutputValue, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (os *OutputValue) DeepCopy() *OutputValue { - if os == nil { - return nil - } - - return &OutputValue{ - Value: os.Value, - Sensitive: os.Sensitive, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go deleted file mode 100644 index ea20967e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go +++ /dev/null @@ -1,18 +0,0 @@ -package states - -import ( - "reflect" -) - -// Equal returns true if the receiver is functionally equivalent to other, -// including any ephemeral portions of the state that would not be included -// if the state were saved to files. -// -// To test only the persistent portions of two states for equality, instead -// use statefile.StatesMarshalEqual. -func (s *State) Equal(other *State) bool { - // For the moment this is sufficient, but we may need to do something - // more elaborate in future if we have any portions of state that require - // more sophisticated comparisons. - return reflect.DeepEqual(s, other) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go deleted file mode 100644 index dffd650d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go +++ /dev/null @@ -1,279 +0,0 @@ -package states - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "sort" - "strings" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" -) - -// String returns a rather-odd string representation of the entire state. -// -// This is intended to match the behavior of the older terraform.State.String -// method that is used in lots of existing tests. It should not be used in -// new tests: instead, use "cmp" to directly compare the state data structures -// and print out a diff if they do not match. -// -// This method should never be used in non-test code, whether directly by call -// or indirectly via a %s or %q verb in package fmt. -func (s *State) String() string { - if s == nil { - return "" - } - - // sort the modules by name for consistent output - modules := make([]string, 0, len(s.Modules)) - for m := range s.Modules { - modules = append(modules, m) - } - sort.Strings(modules) - - var buf bytes.Buffer - for _, name := range modules { - m := s.Modules[name] - mStr := m.testString() - - // If we're the root module, we just write the output directly. - if m.Addr.IsRoot() { - buf.WriteString(mStr + "\n") - continue - } - - // We need to build out a string that resembles the not-quite-standard - // format that terraform.State.String used to use, where there's a - // "module." prefix but then just a chain of all of the module names - // without any further "module." portions. - buf.WriteString("module") - for _, step := range m.Addr { - buf.WriteByte('.') - buf.WriteString(step.Name) - if step.InstanceKey != addrs.NoKey { - buf.WriteByte('[') - buf.WriteString(step.InstanceKey.String()) - buf.WriteByte(']') - } - } - buf.WriteString(":\n") - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - text := s.Text() - if text != "" { - text = " " + text - } - - buf.WriteString(fmt.Sprintf("%s\n", text)) - } - } - - return strings.TrimSpace(buf.String()) -} - -// testString is used to produce part of the output of State.String. It should -// never be used directly. -func (m *Module) testString() string { - var buf bytes.Buffer - - if len(m.Resources) == 0 { - buf.WriteString("") - } - - // We use AbsResourceInstance here, even though everything belongs to - // the same module, just because we have a sorting behavior defined - // for those but not for just ResourceInstance. - addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources)) - for _, rs := range m.Resources { - for ik := range rs.Instances { - addrsOrder = append(addrsOrder, rs.Addr.Instance(ik).Absolute(addrs.RootModuleInstance)) - } - } - - sort.Slice(addrsOrder, func(i, j int) bool { - return addrsOrder[i].Less(addrsOrder[j]) - }) - - for _, fakeAbsAddr := range addrsOrder { - addr := fakeAbsAddr.Resource - rs := m.Resource(addr.ContainingResource()) - is := m.ResourceInstance(addr) - - // Here we need to fake up a legacy-style address as the old state - // types would've used, since that's what our tests against those - // old types expect. The significant difference is that instancekey - // is dot-separated rather than using index brackets. - k := addr.ContainingResource().String() - if addr.Key != addrs.NoKey { - switch tk := addr.Key.(type) { - case addrs.IntKey: - k = fmt.Sprintf("%s.%d", k, tk) - default: - // No other key types existed for the legacy types, so we - // can do whatever we want here. We'll just use our standard - // syntax for these. - k = k + tk.String() - } - } - - id := LegacyInstanceObjectID(is.Current) - - taintStr := "" - if is.Current != nil && is.Current.Status == ObjectTainted { - taintStr = " (tainted)" - } - - deposedStr := "" - if len(is.Deposed) > 0 { - deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed)) - } - - buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) - buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) - buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String())) - - // Attributes were a flatmap before, but are not anymore. To preserve - // our old output as closely as possible we need to do a conversion - // to flatmap. Normally we'd want to do this with schema for - // accuracy, but for our purposes here it only needs to be approximate. - // This should produce an identical result for most cases, though - // in particular will differ in a few cases: - // - The keys used for elements in a set will be different - // - Values for attributes of type cty.DynamicPseudoType will be - // misinterpreted (but these weren't possible in old world anyway) - var attributes map[string]string - if obj := is.Current; obj != nil { - switch { - case obj.AttrsFlat != nil: - // Easy (but increasingly unlikely) case: the state hasn't - // actually been upgraded to the new form yet. - attributes = obj.AttrsFlat - case obj.AttrsJSON != nil: - ty, err := ctyjson.ImpliedType(obj.AttrsJSON) - if err == nil { - val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty) - if err == nil { - attributes = hcl2shim.FlatmapValueFromHCL2(val) - } - } - } - } - attrKeys := make([]string, 0, len(attributes)) - for ak, val := range attributes { - if ak == "id" { - continue - } - - // don't show empty containers in the output - if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) { - continue - } - - attrKeys = append(attrKeys, ak) - } - - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) - } - - // CAUTION: Since deposed keys are now random strings instead of - // incrementing integers, this result will not be deterministic - // if there is more than one deposed object. - i := 1 - for _, t := range is.Deposed { - id := LegacyInstanceObjectID(t) - taintStr := "" - if t.Status == ObjectTainted { - taintStr = " (tainted)" - } - buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr)) - i++ - } - - if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) - for _, dep := range obj.Dependencies { - buf.WriteString(fmt.Sprintf(" %s\n", dep.String())) - } - } - } - - if len(m.OutputValues) > 0 { - buf.WriteString("\nOutputs:\n\n") - - ks := make([]string, 0, len(m.OutputValues)) - for k := range m.OutputValues { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - v := m.OutputValues[k] - lv := hcl2shim.ConfigValueFromHCL2(v.Value) - switch vTyped := lv.(type) { - case string: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case []interface{}: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case map[string]interface{}: - var mapKeys []string - for key := range vTyped { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) - - var mapBuf bytes.Buffer - mapBuf.WriteString("{") - for _, key := range mapKeys { - mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) - } - mapBuf.WriteString("}") - - buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) - default: - buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv)) - } - } - } - - return buf.String() -} - -// LegacyInstanceObjectID is a helper for extracting an object id value from -// an instance object in a way that approximates how we used to do this -// for the old state types. ID is no longer first-class, so this is preserved -// only for compatibility with old tests that include the id as part of their -// expected value. -func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string { - if obj == nil { - return "" - } - - if obj.AttrsJSON != nil { - type WithID struct { - ID string `json:"id"` - } - var withID WithID - err := json.Unmarshal(obj.AttrsJSON, &withID) - if err == nil { - return withID.ID - } - } else if obj.AttrsFlat != nil { - if flatID, exists := obj.AttrsFlat["id"]; exists { - return flatID - } - } - - // For resource types created after we removed id as special there may - // not actually be one at all. This is okay because older tests won't - // encounter this, and new tests shouldn't be using ids. - return "" -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go deleted file mode 100644 index 042ce51c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go +++ /dev/null @@ -1,62 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -const invalidFormat = "Invalid state file format" - -// jsonUnmarshalDiags is a helper that translates errors returned from -// json.Unmarshal into hopefully-more-helpful diagnostics messages. -func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - if err == nil { - return diags - } - - switch tErr := err.(type) { - case *json.SyntaxError: - // We've usually already successfully parsed a source file as JSON at - // least once before we'd use jsonUnmarshalDiags with it (to sniff - // the version number) so this particular error should not appear much - // in practice. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), - )) - case *json.UnmarshalTypeError: - // This is likely to be the most common area, describing a - // non-conformance between the file and the expected file format - // at a semantic level. - if tErr.Field != "" { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), - )) - break - } else { - // Without a field name, we can't really say anything helpful. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - "The state file does not conform to the expected JSON data structure.", - )) - } - default: - // Fallback for all other types of errors. This can happen only for - // custom UnmarshalJSON implementations, so should be encountered - // only rarely. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), - )) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go deleted file mode 100644 index 625d0cf4..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package statefile deals with the file format used to serialize states for -// persistent storage and then deserialize them into memory again later. -package statefile diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go deleted file mode 100644 index 70c8ba6c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go +++ /dev/null @@ -1,31 +0,0 @@ -package statefile - -import ( - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// File is the in-memory representation of a state file. It includes the state -// itself along with various metadata used to track changing state files for -// the same configuration over time. -type File struct { - // TerraformVersion is the version of Terraform that wrote this state file. - TerraformVersion *version.Version - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial uint64 - - // Lineage is set when a new, blank state file is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string - - // State is the actual state represented by this file. - State *states.State -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go deleted file mode 100644 index f1899cd2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go +++ /dev/null @@ -1,209 +0,0 @@ -package statefile - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" -) - -// ErrNoState is returned by ReadState when the state file is empty. -var ErrNoState = errors.New("no state") - -// Read reads a state from the given reader. -// -// Legacy state format versions 1 through 3 are supported, but the result will -// contain object attributes in the deprecated "flatmap" format and so must -// be upgraded by the caller before use. -// -// If the state file is empty, the special error value ErrNoState is returned. -// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics -// potentially describing multiple errors. -func Read(r io.Reader) (*File, error) { - // Some callers provide us a "typed nil" *os.File here, which would - // cause us to panic below if we tried to use it. - if f, ok := r.(*os.File); ok && f == nil { - return nil, ErrNoState - } - - var diags tfdiags.Diagnostics - - // We actually just buffer the whole thing in memory, because states are - // generally not huge and we need to do be able to sniff for a version - // number before full parsing. - src, err := ioutil.ReadAll(r) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read state file", - fmt.Sprintf("The state file could not be read: %s", err), - )) - return nil, diags.Err() - } - - if len(src) == 0 { - return nil, ErrNoState - } - - state, diags := readState(src) - if diags.HasErrors() { - return nil, diags.Err() - } - - if state == nil { - // Should never happen - panic("readState returned nil state with no errors") - } - - if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) { - return state, fmt.Errorf( - "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state", - state.TerraformVersion, - tfversion.SemVer, - state.TerraformVersion, - ) - } - - return state, diags.Err() -} - -func readState(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if looksLikeVersion0(src) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.", - )) - return nil, diags - } - - version, versionDiags := sniffJSONStateVersion(src) - diags = diags.Append(versionDiags) - if versionDiags.HasErrors() { - return nil, diags - } - - switch version { - case 0: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.", - )) - return nil, diags - case 1: - return readStateV1(src) - case 2: - return readStateV2(src) - case 3: - return readStateV3(src) - case 4: - return readStateV4(src) - default: - thisVersion := tfversion.SemVer.String() - creatingVersion := sniffJSONStateTerraformVersion(src) - switch { - case creatingVersion != "": - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion), - )) - } - return nil, diags - } -} - -func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - type VersionSniff struct { - Version *uint64 `json:"version"` - } - var sniff VersionSniff - err := json.Unmarshal(src, &sniff) - if err != nil { - switch tErr := err.(type) { - case *json.SyntaxError: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), - )) - case *json.UnmarshalTypeError: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file could not be parsed as JSON.", - )) - } - } - - if sniff.Version == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file does not have a \"version\" attribute, which is required to identify the format version.", - )) - return 0, diags - } - - return *sniff.Version, diags -} - -// sniffJSONStateTerraformVersion attempts to sniff the Terraform version -// specification from the given state file source code. The result is either -// a version string or an empty string if no version number could be extracted. -// -// This is a best-effort function intended to produce nicer error messages. It -// should not be used for any real processing. -func sniffJSONStateTerraformVersion(src []byte) string { - type VersionSniff struct { - Version string `json:"terraform_version"` - } - var sniff VersionSniff - - err := json.Unmarshal(src, &sniff) - if err != nil { - return "" - } - - // Attempt to parse the string as a version so we won't report garbage - // as a version number. - _, err = version.NewVersion(sniff.Version) - if err != nil { - return "" - } - - return sniff.Version -} - -// unsupportedFormat is a diagnostic summary message for when the state file -// seems to not be a state file at all, or is not a supported version. -// -// Use invalidFormat instead for the subtly-different case of "this looks like -// it's intended to be a state file but it's not structured correctly". -const unsupportedFormat = "Unsupported state file format" - -const upgradeFailed = "State format upgrade failed" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go deleted file mode 100644 index 9b533317..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go +++ /dev/null @@ -1,23 +0,0 @@ -package statefile - -// looksLikeVersion0 sniffs for the signature indicating a version 0 state -// file. -// -// Version 0 was the number retroactively assigned to Terraform's initial -// (unversioned) binary state file format, which was later superseded by the -// version 1 format in JSON. -// -// Version 0 is no longer supported, so this is used only to detect it and -// return a nice error to the user. -func looksLikeVersion0(src []byte) bool { - // Version 0 files begin with the magic prefix "tfstate". - const magic = "tfstate" - if len(src) < len(magic) { - // Not even long enough to have the magic prefix - return false - } - if string(src[0:len(magic)]) == magic { - return true - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go deleted file mode 100644 index 85b422ad..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go +++ /dev/null @@ -1,167 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func readStateV1(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV1 := &stateV1{} - err := json.Unmarshal(src, sV1) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV1(sV1) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV2, err := upgradeStateV1ToV2(sV1) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV2(sV2) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV1 is a representation of the legacy JSON state format version 1. -// -// It is only used to read version 1 JSON files prior to upgrading them to -// the current format. -type stateV1 struct { - // Version is the protocol version. "1" for a StateV1. - Version int `json:"version"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV1 `json:"remote,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV1 `json:"modules"` -} - -type remoteStateV1 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type moduleStateV1 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]string `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV1 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` -} - -type resourceStateV1 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV1 `json:"primary"` - - // Tainted is used to track any underlying instances that - // have been created but are in a bad or unknown state and - // need to be cleaned up subsequently. In the - // standard case, there is only at most a single instance. - // However, in pathological cases, it is possible for the number - // of instances to accumulate. - Tainted []*instanceStateV1 `json:"tainted,omitempty"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. If there were problems creating the - // replacement, the instance remains in the Deposed list so it can be - // destroyed in a future run. Functionally, Deposed instances are very - // similar to Tainted instances in that Terraform is only tracking them in - // order to remember to destroy them. - Deposed []*instanceStateV1 `json:"deposed,omitempty"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider,omitempty"` -} - -type instanceStateV1 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes,omitempty"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. - Meta map[string]string `json:"meta,omitempty"` -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go deleted file mode 100644 index 0b417e1c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go +++ /dev/null @@ -1,172 +0,0 @@ -package statefile - -import ( - "fmt" - "log" - - "github.com/mitchellh/copystructure" -) - -// upgradeStateV1ToV2 is used to upgrade a V1 state representation -// into a V2 state representation -func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) { - log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2") - if old == nil { - return nil, nil - } - - remote, err := old.Remote.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - - modules := make([]*moduleStateV2, len(old.Modules)) - for i, module := range old.Modules { - upgraded, err := module.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - modules[i] = upgraded - } - if len(modules) == 0 { - modules = nil - } - - newState := &stateV2{ - Version: 2, - Serial: old.Serial, - Remote: remote, - Modules: modules, - } - - return newState, nil -} - -func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) { - if old == nil { - return nil, nil - } - - config, err := copystructure.Copy(old.Config) - if err != nil { - return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) - } - - return &remoteStateV2{ - Type: old.Type, - Config: config.(map[string]string), - }, nil -} - -func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) { - if old == nil { - return nil, nil - } - - pathRaw, err := copystructure.Copy(old.Path) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - path, ok := pathRaw.([]string) - if !ok { - return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") - } - if len(path) == 0 { - // We found some V1 states with a nil path. Assume root. - path = []string{"root"} - } - - // Outputs needs upgrading to use the new structure - outputs := make(map[string]*outputStateV2) - for key, output := range old.Outputs { - outputs[key] = &outputStateV2{ - Type: "string", - Value: output, - Sensitive: false, - } - } - - resources := make(map[string]*resourceStateV2) - for key, oldResource := range old.Resources { - upgraded, err := oldResource.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - resources[key] = upgraded - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - - return &moduleStateV2{ - Path: path, - Outputs: outputs, - Resources: resources, - Dependencies: dependencies.([]string), - }, nil -} - -func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) { - if old == nil { - return nil, nil - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - primary, err := old.Primary.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - deposed := make([]*instanceStateV2, len(old.Deposed)) - for i, v := range old.Deposed { - upgraded, err := v.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - deposed[i] = upgraded - } - if len(deposed) == 0 { - deposed = nil - } - - return &resourceStateV2{ - Type: old.Type, - Dependencies: dependencies.([]string), - Primary: primary, - Deposed: deposed, - Provider: old.Provider, - }, nil -} - -func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) { - if old == nil { - return nil, nil - } - - attributes, err := copystructure.Copy(old.Attributes) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - meta, err := copystructure.Copy(old.Meta) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - newMeta := make(map[string]interface{}) - for k, v := range meta.(map[string]string) { - newMeta[k] = v - } - - return &instanceStateV2{ - ID: old.ID, - Attributes: attributes.(map[string]string), - Meta: newMeta, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go deleted file mode 100644 index 6d10166b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go +++ /dev/null @@ -1,204 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func readStateV2(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV2 := &stateV2{} - err := json.Unmarshal(src, sV2) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV2(sV2) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV3, err := upgradeStateV2ToV3(sV2) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV3(sV3) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV2 is a representation of the legacy JSON state format version 2. -// -// It is only used to read version 2 JSON files prior to upgrading them to -// the current format. -type stateV2 struct { - // Version is the state file protocol version. - Version int `json:"version"` - - // TFVersion is the version of Terraform that wrote this state. - TFVersion string `json:"terraform_version,omitempty"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Lineage is set when a new, blank state is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string `json:"lineage"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV2 `json:"remote,omitempty"` - - // Backend tracks the configuration for the backend in use with - // this state. This is used to track any changes in the backend - // configuration. - Backend *backendStateV2 `json:"backend,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV2 `json:"modules"` -} - -type remoteStateV2 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type outputStateV2 struct { - // Sensitive describes whether the output is considered sensitive, - // which may lead to masking the value on screen in some cases. - Sensitive bool `json:"sensitive"` - // Type describes the structure of Value. Valid values are "string", - // "map" and "list" - Type string `json:"type"` - // Value contains the value of the output, in the structure described - // by the Type field. - Value interface{} `json:"value"` -} - -type moduleStateV2 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Locals are kept only transiently in-memory, because we can always - // re-compute them. - Locals map[string]interface{} `json:"-"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]*outputStateV2 `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV2 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` -} - -type resourceStateV2 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV2 `json:"primary"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. - // - // If there were problems creating the replacement Primary, the Deposed - // instance and the (now tainted) replacement Primary will be swapped so the - // tainted replacement will be cleaned up instead. - // - // An instance will remain in the Deposed list until it is successfully - // destroyed and purged. - Deposed []*instanceStateV2 `json:"deposed"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider"` -} - -type instanceStateV2 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. The value here must only contain Go primitives - // and collections. - Meta map[string]interface{} `json:"meta"` - - // Tainted is used to mark a resource for recreation. - Tainted bool `json:"tainted"` -} - -type backendStateV2 struct { - Type string `json:"type"` // Backend type - ConfigRaw json.RawMessage `json:"config"` // Backend raw config - Hash uint64 `json:"hash"` // Hash of portion of configuration from config files -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go deleted file mode 100644 index 2d03c07c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go +++ /dev/null @@ -1,145 +0,0 @@ -package statefile - -import ( - "fmt" - "log" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/mitchellh/copystructure" -) - -func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) { - if old == nil { - return (*stateV3)(nil), nil - } - - var new *stateV3 - { - copy, err := copystructure.Config{Lock: true}.Copy(old) - if err != nil { - panic(err) - } - newWrongType := copy.(*stateV2) - newRightType := (stateV3)(*newWrongType) - new = &newRightType - } - - // Set the new version number - new.Version = 3 - - // Change the counts for things which look like maps to use the % - // syntax. Remove counts for empty collections - they will be added - // back in later. - for _, module := range new.Modules { - for _, resource := range module.Resources { - // Upgrade Primary - if resource.Primary != nil { - upgradeAttributesV2ToV3(resource.Primary) - } - - // Upgrade Deposed - for _, deposed := range resource.Deposed { - upgradeAttributesV2ToV3(deposed) - } - } - } - - return new, nil -} - -func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error { - collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) - collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) - - // Identify the key prefix of anything which is a collection - var collectionKeyPrefixes []string - for key := range instanceState.Attributes { - if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) - } - } - sort.Strings(collectionKeyPrefixes) - - log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) - - // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not - // run very often. - for _, prefix := range collectionKeyPrefixes { - // First get the actual keys that belong to this prefix - var potentialKeysMatching []string - for key := range instanceState.Attributes { - if strings.HasPrefix(key, prefix) { - potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) - } - } - sort.Strings(potentialKeysMatching) - - var actualKeysMatching []string - for _, key := range potentialKeysMatching { - if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - actualKeysMatching = append(actualKeysMatching, submatches[0][1]) - } else { - if key != "#" { - actualKeysMatching = append(actualKeysMatching, key) - } - } - } - actualKeysMatching = uniqueSortedStrings(actualKeysMatching) - - // Now inspect the keys in order to determine whether this is most likely to be - // a map, list or set. There is room for error here, so we log in each case. If - // there is no method of telling, we remove the key from the InstanceState in - // order that it will be recreated. Again, this could be rolled into fewer loops - // but we prefer clarity. - - oldCountKey := fmt.Sprintf("%s#", prefix) - - // First, detect "obvious" maps - which have non-numeric keys (mostly). - hasNonNumericKeys := false - for _, key := range actualKeysMatching { - if _, err := strconv.Atoi(key); err != nil { - hasNonNumericKeys = true - } - } - if hasNonNumericKeys { - newCountKey := fmt.Sprintf("%s%%", prefix) - - instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", - strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) - } - - // Now detect empty collections and remove them from state. - if len(actualKeysMatching) == 0 { - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", - strings.TrimSuffix(prefix, ".")) - } - } - - return nil -} - -// uniqueSortedStrings removes duplicates from a slice of strings and returns -// a sorted slice of the unique strings. -func uniqueSortedStrings(input []string) []string { - uniquemap := make(map[string]struct{}) - for _, str := range input { - uniquemap[str] = struct{}{} - } - - output := make([]string, len(uniquemap)) - - i := 0 - for key := range uniquemap { - output[i] = key - i = i + 1 - } - - sort.Strings(output) - return output -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go deleted file mode 100644 index 1c81e716..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go +++ /dev/null @@ -1,50 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func readStateV3(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV3 := &stateV3{} - err := json.Unmarshal(src, sV3) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV3(sV3) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV4, err := upgradeStateV3ToV4(sV3) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV4(sV4) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV2 is a representation of the legacy JSON state format version 3. -// -// It is only used to read version 3 JSON files prior to upgrading them to -// the current format. -// -// The differences between version 2 and version 3 are only in the data and -// not in the structure, so stateV3 actually shares the same structs as -// stateV2. Type stateV3 represents that the data within is formatted as -// expected by the V3 format, rather than the V2 format. -type stateV3 stateV2 diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go deleted file mode 100644 index f08a62b2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go +++ /dev/null @@ -1,444 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) { - - if old.Serial < 0 { - // The new format is using uint64 here, which should be fine for any - // real state (we only used positive integers in practice) but we'll - // catch this explicitly here to avoid weird behavior if a state file - // has been tampered with in some way. - return nil, fmt.Errorf("state has serial less than zero, which is invalid") - } - - new := &stateV4{ - TerraformVersion: old.TFVersion, - Serial: uint64(old.Serial), - Lineage: old.Lineage, - RootOutputs: map[string]outputStateV4{}, - Resources: []resourceStateV4{}, - } - - if new.TerraformVersion == "" { - // Older formats considered this to be optional, but now it's required - // and so we'll stub it out with something that's definitely older - // than the version that really created this state. - new.TerraformVersion = "0.0.0" - } - - for _, msOld := range old.Modules { - if len(msOld.Path) < 1 || msOld.Path[0] != "root" { - return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path) - } - - // Convert legacy-style module address into our newer address type. - // Since these old formats are only generated by versions of Terraform - // that don't support count and for_each on modules, we can just assume - // all of the modules are unkeyed. - moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1) - for i, name := range msOld.Path[1:] { - moduleAddr[i] = addrs.ModuleInstanceStep{ - Name: name, - InstanceKey: addrs.NoKey, - } - } - - // In a v3 state file, a "resource state" is actually an instance - // state, so we need to fill in a missing level of heirarchy here - // by lazily creating resource states as we encounter them. - // We'll track them in here, keyed on the string representation of - // the resource address. - resourceStates := map[string]*resourceStateV4{} - - for legacyAddr, rsOld := range msOld.Resources { - instAddr, err := parseLegacyResourceAddress(legacyAddr) - if err != nil { - return nil, err - } - - resAddr := instAddr.Resource - rs, exists := resourceStates[resAddr.String()] - if !exists { - var modeStr string - switch resAddr.Mode { - case addrs.ManagedResourceMode: - modeStr = "managed" - case addrs.DataResourceMode: - modeStr = "data" - default: - return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode %#v", resAddr, resAddr.Mode) - } - - // In state versions prior to 4 we allowed each instance of a - // resource to have its own provider configuration address, - // which makes no real sense in practice because providers - // are associated with resources in the configuration. We - // elevate that to the resource level during this upgrade, - // implicitly taking the provider address of the first instance - // we encounter for each resource. While this is lossy in - // theory, in practice there is no reason for these values to - // differ between instances. - var providerAddr addrs.AbsProviderConfig - oldProviderAddr := rsOld.Provider - if strings.Contains(oldProviderAddr, "provider.") { - // Smells like a new-style provider address, but we'll test it. - var diags tfdiags.Diagnostics - providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr) - if diags.HasErrors() { - return nil, fmt.Errorf("invalid provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) - } - } else { - // Smells like an old-style module-local provider address, - // which we'll need to migrate. We'll assume it's referring - // to the same module the resource is in, which might be - // incorrect but it'll get fixed up next time any updates - // are made to an instance. - if oldProviderAddr != "" { - localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr) - if diags.HasErrors() { - return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) - } - providerAddr = localAddr.Absolute(moduleAddr) - } else { - providerAddr = resAddr.DefaultProviderConfig().Absolute(moduleAddr) - } - } - - rs = &resourceStateV4{ - Module: moduleAddr.String(), - Mode: modeStr, - Type: resAddr.Type, - Name: resAddr.Name, - Instances: []instanceObjectStateV4{}, - ProviderConfig: providerAddr.String(), - } - resourceStates[resAddr.String()] = rs - } - - // Now we'll deal with the instance itself, which may either be - // the first instance in a resource we just created or an additional - // instance for a resource added on a prior loop. - instKey := instAddr.Key - if isOld := rsOld.Primary; isOld != nil { - isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed) - if err != nil { - return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err) - } - rs.Instances = append(rs.Instances, *isNew) - } - for i, isOld := range rsOld.Deposed { - // When we migrate old instances we'll use sequential deposed - // keys just so that the upgrade result is deterministic. New - // deposed keys allocated moving forward will be pseudorandomly - // selected, but we check for collisions and so these - // non-random ones won't hurt. - deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1)) - isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey) - if err != nil { - return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err) - } - rs.Instances = append(rs.Instances, *isNew) - } - - if instKey != addrs.NoKey && rs.EachMode == "" { - rs.EachMode = "list" - } - } - - for _, rs := range resourceStates { - new.Resources = append(new.Resources, *rs) - } - - if len(msOld.Path) == 1 && msOld.Path[0] == "root" { - // We'll migrate the outputs for this module too, then. - for name, oldOS := range msOld.Outputs { - newOS := outputStateV4{ - Sensitive: oldOS.Sensitive, - } - - valRaw := oldOS.Value - valSrc, err := json.Marshal(valRaw) - if err != nil { - // Should never happen, because this value came from JSON - // in the first place and so we're just round-tripping here. - return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err) - } - - // The "type" field in state V2 wasn't really that useful - // since it was only able to capture string vs. list vs. map. - // For this reason, during upgrade we'll just discard it - // altogether and use cty's idea of the implied type of - // turning our old value into JSON. - ty, err := ctyjson.ImpliedType(valSrc) - if err != nil { - // REALLY should never happen, because we literally just - // encoded this as JSON above! - return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err) - } - - // ImpliedType tends to produce structural types, but since older - // version of Terraform didn't support those a collection type - // is probably what was intended, so we'll see if we can - // interpret our value as one. - ty = simplifyImpliedValueType(ty) - - tySrc, err := ctyjson.MarshalType(ty) - if err != nil { - return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err) - } - - newOS.ValueRaw = json.RawMessage(valSrc) - newOS.ValueTypeRaw = json.RawMessage(tySrc) - - new.RootOutputs[name] = newOS - } - } - } - - new.normalize() - - return new, nil -} - -func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) { - - // Schema versions were, in prior formats, a private concern of the provider - // SDK, and not a first-class concept in the state format. Here we're - // sniffing for the pre-0.12 SDK's way of representing schema versions - // and promoting it to our first-class field if we find it. We'll ignore - // it if it doesn't look like what the SDK would've written. If this - // sniffing fails then we'll assume schema version 0. - var schemaVersion uint64 - migratedSchemaVersion := false - if raw, exists := isOld.Meta["schema_version"]; exists { - switch tv := raw.(type) { - case string: - v, err := strconv.ParseUint(tv, 10, 64) - if err == nil { - schemaVersion = v - migratedSchemaVersion = true - } - case int: - schemaVersion = uint64(tv) - migratedSchemaVersion = true - case float64: - schemaVersion = uint64(tv) - migratedSchemaVersion = true - } - } - - private := map[string]interface{}{} - for k, v := range isOld.Meta { - if k == "schema_version" && migratedSchemaVersion { - // We're gonna promote this into our first-class schema version field - continue - } - private[k] = v - } - var privateJSON []byte - if len(private) != 0 { - var err error - privateJSON, err = json.Marshal(private) - if err != nil { - // This shouldn't happen, because the Meta values all came from JSON - // originally anyway. - return nil, fmt.Errorf("cannot serialize private instance object data: %s", err) - } - } - - var status string - if isOld.Tainted { - status = "tainted" - } - - var instKeyRaw interface{} - switch tk := instKey.(type) { - case addrs.IntKey: - instKeyRaw = int(tk) - case addrs.StringKey: - instKeyRaw = string(tk) - default: - if instKeyRaw != nil { - return nil, fmt.Errorf("unsupported instance key: %#v", instKey) - } - } - - var attributes map[string]string - if isOld.Attributes != nil { - attributes = make(map[string]string, len(isOld.Attributes)) - for k, v := range isOld.Attributes { - attributes[k] = v - } - } - if isOld.ID != "" { - // As a special case, if we don't already have an "id" attribute and - // yet there's a non-empty first-class ID on the old object then we'll - // create a synthetic id attribute to avoid losing that first-class id. - // In practice this generally arises only in tests where state literals - // are hand-written in a non-standard way; real code prior to 0.12 - // would always force the first-class ID to be copied into the - // id attribute before storing. - if attributes == nil { - attributes = make(map[string]string, len(isOld.Attributes)) - } - if idVal := attributes["id"]; idVal == "" { - attributes["id"] = isOld.ID - } - } - - dependencies := make([]string, len(rsOld.Dependencies)) - for i, v := range rsOld.Dependencies { - depStr, err := parseLegacyDependency(v) - if err != nil { - return nil, fmt.Errorf("invalid dependency reference %q: %s", v, err) - } - dependencies[i] = depStr - } - - return &instanceObjectStateV4{ - IndexKey: instKeyRaw, - Status: status, - Deposed: string(deposedKey), - AttributesFlat: attributes, - Dependencies: dependencies, - SchemaVersion: schemaVersion, - PrivateRaw: privateJSON, - }, nil -} - -// parseLegacyResourceAddress parses the different identifier format used -// state formats before version 4, like "instance.name.0". -func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) { - var ret addrs.ResourceInstance - - // Split based on ".". Every resource address should have at least two - // elements (type and name). - parts := strings.Split(s, ".") - if len(parts) < 2 || len(parts) > 4 { - return ret, fmt.Errorf("invalid internal resource address format: %s", s) - } - - // Data resource if we have at least 3 parts and the first one is data - ret.Resource.Mode = addrs.ManagedResourceMode - if len(parts) > 2 && parts[0] == "data" { - ret.Resource.Mode = addrs.DataResourceMode - parts = parts[1:] - } - - // If we're not a data resource and we have more than 3, then it is an error - if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode { - return ret, fmt.Errorf("invalid internal resource address format: %s", s) - } - - // Build the parts of the resource address that are guaranteed to exist - ret.Resource.Type = parts[0] - ret.Resource.Name = parts[1] - ret.Key = addrs.NoKey - - // If we have more parts, then we have an index. Parse that. - if len(parts) > 2 { - idx, err := strconv.ParseInt(parts[2], 0, 0) - if err != nil { - return ret, fmt.Errorf("error parsing resource address %q: %s", s, err) - } - - ret.Key = addrs.IntKey(idx) - } - - return ret, nil -} - -// simplifyImpliedValueType attempts to heuristically simplify a value type -// derived from a legacy stored output value into something simpler that -// is closer to what would've fitted into the pre-v0.12 value type system. -func simplifyImpliedValueType(ty cty.Type) cty.Type { - switch { - case ty.IsTupleType(): - // If all of the element types are the same then we'll make this - // a list instead. This is very likely to be true, since prior versions - // of Terraform did not officially support mixed-type collections. - - if ty.Equals(cty.EmptyTuple) { - // Don't know what the element type would be, then. - return ty - } - - etys := ty.TupleElementTypes() - ety := etys[0] - for _, other := range etys[1:] { - if !other.Equals(ety) { - // inconsistent types - return ty - } - } - ety = simplifyImpliedValueType(ety) - return cty.List(ety) - - case ty.IsObjectType(): - // If all of the attribute types are the same then we'll make this - // a map instead. This is very likely to be true, since prior versions - // of Terraform did not officially support mixed-type collections. - - if ty.Equals(cty.EmptyObject) { - // Don't know what the element type would be, then. - return ty - } - - atys := ty.AttributeTypes() - var ety cty.Type - for _, other := range atys { - if ety == cty.NilType { - ety = other - continue - } - if !other.Equals(ety) { - // inconsistent types - return ty - } - } - ety = simplifyImpliedValueType(ety) - return cty.Map(ety) - - default: - // No other normalizations are possible - return ty - } -} - -func parseLegacyDependency(s string) (string, error) { - parts := strings.Split(s, ".") - ret := parts[0] - for _, part := range parts[1:] { - if part == "*" { - break - } - if i, err := strconv.Atoi(part); err == nil { - ret = ret + fmt.Sprintf("[%d]", i) - break - } - ret = ret + "." + part - } - - // The result must parse as a reference, or else we'll create an invalid - // state file. - var diags tfdiags.Diagnostics - _, diags = addrs.ParseRefStr(ret) - if diags.HasErrors() { - return "", diags.Err() - } - - return ret, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go deleted file mode 100644 index 164b57f8..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go +++ /dev/null @@ -1,604 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - "io" - "sort" - - version "github.com/hashicorp/go-version" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func readStateV4(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV4 := &stateV4{} - err := json.Unmarshal(src, sV4) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV4(sV4) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - var tfVersion *version.Version - if sV4.TerraformVersion != "" { - var err error - tfVersion, err = version.NewVersion(sV4.TerraformVersion) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid Terraform version string", - fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion), - )) - } - } - - file := &File{ - TerraformVersion: tfVersion, - Serial: sV4.Serial, - Lineage: sV4.Lineage, - } - - state := states.NewState() - - for _, rsV4 := range sV4.Resources { - rAddr := addrs.Resource{ - Type: rsV4.Type, - Name: rsV4.Name, - } - switch rsV4.Mode { - case "managed": - rAddr.Mode = addrs.ManagedResourceMode - case "data": - rAddr.Mode = addrs.DataResourceMode - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource mode in state", - fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name), - )) - continue - } - - moduleAddr := addrs.RootModuleInstance - if rsV4.Module != "" { - var addrDiags tfdiags.Diagnostics - moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module) - diags = diags.Append(addrDiags) - if addrDiags.HasErrors() { - continue - } - } - - providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig) - diags.Append(addrDiags) - if addrDiags.HasErrors() { - continue - } - - var eachMode states.EachMode - switch rsV4.EachMode { - case "": - eachMode = states.NoEach - case "list": - eachMode = states.EachList - case "map": - eachMode = states.EachMap - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource metadata in state", - fmt.Sprintf("Resource %s has invalid \"each\" value %q in state.", rAddr.Absolute(moduleAddr), eachMode), - )) - continue - } - - ms := state.EnsureModule(moduleAddr) - - // Ensure the resource container object is present in the state. - ms.SetResourceMeta(rAddr, eachMode, providerAddr) - - for _, isV4 := range rsV4.Instances { - keyRaw := isV4.IndexKey - var key addrs.InstanceKey - switch tk := keyRaw.(type) { - case int: - key = addrs.IntKey(tk) - case float64: - // Since JSON only has one number type, reading from encoding/json - // gives us a float64 here even if the number is whole. - // float64 has a smaller integer range than int, but in practice - // we rarely have more than a few tens of instances and so - // it's unlikely that we'll exhaust the 52 bits in a float64. - key = addrs.IntKey(int(tk)) - case string: - key = addrs.StringKey(tk) - default: - if keyRaw != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw), - )) - continue - } - key = addrs.NoKey - } - - instAddr := rAddr.Instance(key) - - obj := &states.ResourceInstanceObjectSrc{ - SchemaVersion: isV4.SchemaVersion, - } - - { - // Instance attributes - switch { - case isV4.AttributesRaw != nil: - obj.AttrsJSON = isV4.AttributesRaw - case isV4.AttributesFlat != nil: - obj.AttrsFlat = isV4.AttributesFlat - default: - // This is odd, but we'll accept it and just treat the - // object has being empty. In practice this should arise - // only from the contrived sort of state objects we tend - // to hand-write inline in tests. - obj.AttrsJSON = []byte{'{', '}'} - } - } - - { - // Status - raw := isV4.Status - switch raw { - case "": - obj.Status = states.ObjectReady - case "tainted": - obj.Status = states.ObjectTainted - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw), - )) - continue - } - } - - if raw := isV4.PrivateRaw; len(raw) > 0 { - obj.Private = raw - } - - { - depsRaw := isV4.Dependencies - deps := make([]addrs.Referenceable, 0, len(depsRaw)) - for _, depRaw := range depsRaw { - ref, refDiags := addrs.ParseRefStr(depRaw) - diags = diags.Append(refDiags) - if refDiags.HasErrors() { - continue - } - if len(ref.Remaining) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s declares dependency on %q, which is not a reference to a dependable object.", instAddr.Absolute(moduleAddr), depRaw), - )) - } - if ref.Subject == nil { - // Should never happen - panic(fmt.Sprintf("parsing dependency %q for instance %s returned a nil address", depRaw, instAddr.Absolute(moduleAddr))) - } - deps = append(deps, ref.Subject) - } - obj.Dependencies = deps - } - - switch { - case isV4.Deposed != "": - dk := states.DeposedKey(isV4.Deposed) - if len(dk) != 8 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed), - )) - continue - } - is := ms.ResourceInstance(instAddr) - if is.HasDeposed(dk) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Duplicate resource instance in state", - fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk), - )) - continue - } - - ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr) - default: - is := ms.ResourceInstance(instAddr) - if is.HasCurrent() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Duplicate resource instance in state", - fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)), - )) - continue - } - - ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr) - } - } - - // We repeat this after creating the instances because - // SetResourceInstanceCurrent automatically resets this metadata based - // on the incoming objects. That behavior is useful when we're making - // piecemeal updates to the state during an apply, but when we're - // reading the state file we want to reflect its contents exactly. - ms.SetResourceMeta(rAddr, eachMode, providerAddr) - } - - // The root module is special in that we persist its attributes and thus - // need to reload them now. (For descendent modules we just re-calculate - // them based on the latest configuration on each run.) - { - rootModule := state.RootModule() - for name, fos := range sV4.RootOutputs { - os := &states.OutputValue{} - os.Sensitive = fos.Sensitive - - ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw)) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid output value type in state", - fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err), - )) - continue - } - - val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid output value saved in state", - fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err), - )) - continue - } - - os.Value = val - rootModule.OutputValues[name] = os - } - } - - file.State = state - return file, diags -} - -func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics { - // Here we'll convert back from the "File" representation to our - // stateV4 struct representation and write that. - // - // While we support legacy state formats for reading, we only support the - // latest for writing and so if a V5 is added in future then this function - // should be deleted and replaced with a writeStateV5, even though the - // read/prepare V4 functions above would stick around. - - var diags tfdiags.Diagnostics - if file == nil || file.State == nil { - panic("attempt to write nil state to file") - } - - var terraformVersion string - if file.TerraformVersion != nil { - terraformVersion = file.TerraformVersion.String() - } - - sV4 := &stateV4{ - TerraformVersion: terraformVersion, - Serial: file.Serial, - Lineage: file.Lineage, - RootOutputs: map[string]outputStateV4{}, - Resources: []resourceStateV4{}, - } - - for name, os := range file.State.RootModule().OutputValues { - src, err := ctyjson.Marshal(os.Value, os.Value.Type()) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize output value in state", - fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err), - )) - continue - } - - typeSrc, err := ctyjson.MarshalType(os.Value.Type()) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize output value in state", - fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err), - )) - continue - } - - sV4.RootOutputs[name] = outputStateV4{ - Sensitive: os.Sensitive, - ValueRaw: json.RawMessage(src), - ValueTypeRaw: json.RawMessage(typeSrc), - } - } - - for _, ms := range file.State.Modules { - moduleAddr := ms.Addr - for _, rs := range ms.Resources { - resourceAddr := rs.Addr - - var mode string - switch resourceAddr.Mode { - case addrs.ManagedResourceMode: - mode = "managed" - case addrs.DataResourceMode: - mode = "data" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource in state", - fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode), - )) - continue - } - - var eachMode string - switch rs.EachMode { - case states.NoEach: - eachMode = "" - case states.EachList: - eachMode = "list" - case states.EachMap: - eachMode = "map" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource in state", - fmt.Sprintf("Resource %s has \"each\" mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), rs.EachMode), - )) - continue - } - - sV4.Resources = append(sV4.Resources, resourceStateV4{ - Module: moduleAddr.String(), - Mode: mode, - Type: resourceAddr.Type, - Name: resourceAddr.Name, - EachMode: eachMode, - ProviderConfig: rs.ProviderConfig.String(), - Instances: []instanceObjectStateV4{}, - }) - rsV4 := &(sV4.Resources[len(sV4.Resources)-1]) - - for key, is := range rs.Instances { - if is.HasCurrent() { - var objDiags tfdiags.Diagnostics - rsV4.Instances, objDiags = appendInstanceObjectStateV4( - rs, is, key, is.Current, states.NotDeposed, - rsV4.Instances, - ) - diags = diags.Append(objDiags) - } - for dk, obj := range is.Deposed { - var objDiags tfdiags.Diagnostics - rsV4.Instances, objDiags = appendInstanceObjectStateV4( - rs, is, key, obj, dk, - rsV4.Instances, - ) - diags = diags.Append(objDiags) - } - } - } - } - - sV4.normalize() - - src, err := json.MarshalIndent(sV4, "", " ") - if err != nil { - // Shouldn't happen if we do our conversion to *stateV4 correctly above. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize state", - fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err), - )) - return diags - } - src = append(src, '\n') - - _, err = w.Write(src) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to write state", - fmt.Sprintf("An error occured while writing the serialized state: %s.", err), - )) - return diags - } - - return diags -} - -func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - var status string - switch obj.Status { - case states.ObjectReady: - status = "" - case states.ObjectTainted: - status = "tainted" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource instance in state", - fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status), - )) - } - - var privateRaw []byte - if len(obj.Private) > 0 { - privateRaw = obj.Private - } - - deps := make([]string, len(obj.Dependencies)) - for i, depAddr := range obj.Dependencies { - deps[i] = depAddr.String() - } - - var rawKey interface{} - switch tk := key.(type) { - case addrs.IntKey: - rawKey = int(tk) - case addrs.StringKey: - rawKey = string(tk) - default: - if key != addrs.NoKey { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource instance in state", - fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key), - )) - } - } - - return append(isV4s, instanceObjectStateV4{ - IndexKey: rawKey, - Deposed: string(deposed), - Status: status, - SchemaVersion: obj.SchemaVersion, - AttributesFlat: obj.AttrsFlat, - AttributesRaw: obj.AttrsJSON, - PrivateRaw: privateRaw, - Dependencies: deps, - }), diags -} - -type stateV4 struct { - Version stateVersionV4 `json:"version"` - TerraformVersion string `json:"terraform_version"` - Serial uint64 `json:"serial"` - Lineage string `json:"lineage"` - RootOutputs map[string]outputStateV4 `json:"outputs"` - Resources []resourceStateV4 `json:"resources"` -} - -// normalize makes some in-place changes to normalize the way items are -// stored to ensure that two functionally-equivalent states will be stored -// identically. -func (s *stateV4) normalize() { - sort.Stable(sortResourcesV4(s.Resources)) - for _, rs := range s.Resources { - sort.Stable(sortInstancesV4(rs.Instances)) - } -} - -type outputStateV4 struct { - ValueRaw json.RawMessage `json:"value"` - ValueTypeRaw json.RawMessage `json:"type"` - Sensitive bool `json:"sensitive,omitempty"` -} - -type resourceStateV4 struct { - Module string `json:"module,omitempty"` - Mode string `json:"mode"` - Type string `json:"type"` - Name string `json:"name"` - EachMode string `json:"each,omitempty"` - ProviderConfig string `json:"provider"` - Instances []instanceObjectStateV4 `json:"instances"` -} - -type instanceObjectStateV4 struct { - IndexKey interface{} `json:"index_key,omitempty"` - Status string `json:"status,omitempty"` - Deposed string `json:"deposed,omitempty"` - - SchemaVersion uint64 `json:"schema_version"` - AttributesRaw json.RawMessage `json:"attributes,omitempty"` - AttributesFlat map[string]string `json:"attributes_flat,omitempty"` - - PrivateRaw []byte `json:"private,omitempty"` - - Dependencies []string `json:"depends_on,omitempty"` -} - -// stateVersionV4 is a weird special type we use to produce our hard-coded -// "version": 4 in the JSON serialization. -type stateVersionV4 struct{} - -func (sv stateVersionV4) MarshalJSON() ([]byte, error) { - return []byte{'4'}, nil -} - -func (sv stateVersionV4) UnmarshalJSON([]byte) error { - // Nothing to do: we already know we're version 4 - return nil -} - -type sortResourcesV4 []resourceStateV4 - -func (sr sortResourcesV4) Len() int { return len(sr) } -func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] } -func (sr sortResourcesV4) Less(i, j int) bool { - switch { - case sr[i].Mode != sr[j].Mode: - return sr[i].Mode < sr[j].Mode - case sr[i].Type != sr[j].Type: - return sr[i].Type < sr[j].Type - case sr[i].Name != sr[j].Name: - return sr[i].Name < sr[j].Name - default: - return false - } -} - -type sortInstancesV4 []instanceObjectStateV4 - -func (si sortInstancesV4) Len() int { return len(si) } -func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] } -func (si sortInstancesV4) Less(i, j int) bool { - ki := si[i].IndexKey - kj := si[j].IndexKey - if ki != kj { - if (ki == nil) != (kj == nil) { - return ki == nil - } - if kii, isInt := ki.(int); isInt { - if kji, isInt := kj.(int); isInt { - return kii < kji - } - return true - } - if kis, isStr := ki.(string); isStr { - if kjs, isStr := kj.(string); isStr { - return kis < kjs - } - return true - } - } - if si[i].Deposed != si[j].Deposed { - return si[i].Deposed < si[j].Deposed - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go deleted file mode 100644 index 8fdca458..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go +++ /dev/null @@ -1,17 +0,0 @@ -package statefile - -import ( - "io" - - tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" -) - -// Write writes the given state to the given writer in the current state -// serialization format. -func Write(s *File, w io.Writer) error { - // Always record the current terraform version in the state. - s.TerraformVersion = tfversion.SemVer - - diags := writeStateV4(s, w) - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go deleted file mode 100644 index 6d236125..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go +++ /dev/null @@ -1,484 +0,0 @@ -package states - -import ( - "log" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// SyncState is a wrapper around State that provides concurrency-safe access to -// various common operations that occur during a Terraform graph walk, or other -// similar concurrent contexts. -// -// When a SyncState wrapper is in use, no concurrent direct access to the -// underlying objects is permitted unless the caller first acquires an explicit -// lock, using the Lock and Unlock methods. Most callers should _not_ -// explicitly lock, and should instead use the other methods of this type that -// handle locking automatically. -// -// Since SyncState is able to safely consolidate multiple updates into a single -// atomic operation, many of its methods are at a higher level than those -// of the underlying types, and operate on the state as a whole rather than -// on individual sub-structures of the state. -// -// SyncState can only protect against races within its own methods. It cannot -// provide any guarantees about the order in which concurrent operations will -// be processed, so callers may still need to employ higher-level techniques -// for ensuring correct operation sequencing, such as building and walking -// a dependency graph. -type SyncState struct { - state *State - lock sync.RWMutex -} - -// Module returns a snapshot of the state of the module instance with the given -// address, or nil if no such module is tracked. -// -// The return value is a pointer to a copy of the module state, which the -// caller may then freely access and mutate. However, since the module state -// tends to be a large data structure with many child objects, where possible -// callers should prefer to use a more granular accessor to access a child -// module directly, and thus reduce the amount of copying required. -func (s *SyncState) Module(addr addrs.ModuleInstance) *Module { - s.lock.RLock() - ret := s.state.Module(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// OutputValue returns a snapshot of the state of the output value with the -// given address, or nil if no such output value is tracked. -// -// The return value is a pointer to a copy of the output value state, which the -// caller may then freely access and mutate. -func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue { - s.lock.RLock() - ret := s.state.OutputValue(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// SetOutputValue writes a given output value into the state, overwriting -// any existing value of the same name. -// -// If the module containing the output is not yet tracked in state then it -// be added as a side-effect. -func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetOutputValue(addr.OutputValue.Name, value, sensitive) -} - -// RemoveOutputValue removes the stored value for the output value with the -// given address. -// -// If this results in its containing module being empty, the module will be -// pruned from the state as a side-effect. -func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.RemoveOutputValue(addr.OutputValue.Name) - s.maybePruneModule(addr.Module) -} - -// LocalValue returns the current value associated with the given local value -// address. -func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value { - s.lock.RLock() - // cty.Value is immutable, so we don't need any extra copying here. - ret := s.state.LocalValue(addr) - s.lock.RUnlock() - return ret -} - -// SetLocalValue writes a given output value into the state, overwriting -// any existing value of the same name. -// -// If the module containing the local value is not yet tracked in state then it -// will be added as a side-effect. -func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetLocalValue(addr.LocalValue.Name, value) -} - -// RemoveLocalValue removes the stored value for the local value with the -// given address. -// -// If this results in its containing module being empty, the module will be -// pruned from the state as a side-effect. -func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.RemoveLocalValue(addr.LocalValue.Name) - s.maybePruneModule(addr.Module) -} - -// Resource returns a snapshot of the state of the resource with the given -// address, or nil if no such resource is tracked. -// -// The return value is a pointer to a copy of the resource state, which the -// caller may then freely access and mutate. -func (s *SyncState) Resource(addr addrs.AbsResource) *Resource { - s.lock.RLock() - ret := s.state.Resource(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// ResourceInstance returns a snapshot of the state the resource instance with -// the given address, or nil if no such instance is tracked. -// -// The return value is a pointer to a copy of the instance state, which the -// caller may then freely access and mutate. -func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { - s.lock.RLock() - ret := s.state.ResourceInstance(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// ResourceInstanceObject returns a snapshot of the current instance object -// of the given generation belonging to the instance with the given address, -// or nil if no such object is tracked.. -// -// The return value is a pointer to a copy of the object, which the caller may -// then freely access and mutate. -func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc { - s.lock.RLock() - defer s.lock.RUnlock() - - inst := s.state.ResourceInstance(addr) - if inst == nil { - return nil - } - return inst.GetGeneration(gen).DeepCopy() -} - -// SetResourceMeta updates the resource-level metadata for the resource at -// the given address, creating the containing module state and resource state -// as a side-effect if not already present. -func (s *SyncState) SetResourceMeta(addr addrs.AbsResource, eachMode EachMode, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceMeta(addr.Resource, eachMode, provider) -} - -// RemoveResourceIfEmpty is similar to RemoveResource but first checks to -// make sure there are no instances or objects left in the resource. -// -// Returns true if the resource was removed, or false if remaining child -// objects prevented its removal. Returns true also if the resource was -// already absent, and thus no action needed to be taken. -func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return true // nothing to do - } - rs := ms.Resource(addr.Resource) - if rs == nil { - return true // nothing to do - } - if len(rs.Instances) != 0 { - // We don't check here for the possibility of instances that exist - // but don't have any objects because it's the responsibility of the - // instance-mutation methods to prune those away automatically. - return false - } - ms.RemoveResource(addr.Resource) - s.maybePruneModule(addr.Module) - return true -} - -// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a -// resource has changed from having "count" set to not set, or vice-versa, and -// so we need to rename the zeroth instance key to no key at all, or vice-versa. -// -// Set countEnabled to true if the resource has count set in its new -// configuration, or false if it does not. -// -// The state is modified in-place if necessary, moving a resource instance -// between the two addresses. The return value is true if a change was made, -// and false otherwise. -func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.AbsResource, countEnabled bool) bool { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return false - } - - relAddr := addr.Resource - rs := ms.Resource(relAddr) - if rs == nil { - return false - } - huntKey := addrs.NoKey - replaceKey := addrs.InstanceKey(addrs.IntKey(0)) - if !countEnabled { - huntKey, replaceKey = replaceKey, huntKey - } - - is, exists := rs.Instances[huntKey] - if !exists { - return false - } - - if _, exists := rs.Instances[replaceKey]; exists { - // If the replacement key also exists then we'll do nothing and keep both. - return false - } - - // If we get here then we need to "rename" from hunt to replace - rs.Instances[replaceKey] = is - delete(rs.Instances, huntKey) - return true -} - -// SetResourceInstanceCurrent saves the given instance object as the current -// generation of the resource instance with the given address, simultaneously -// updating the recorded provider configuration address, dependencies, and -// resource EachMode. -// -// Any existing current instance object for the given resource is overwritten. -// Set obj to nil to remove the primary generation object altogether. If there -// are no deposed objects then the instance as a whole will be removed, which -// may in turn also remove the containing module if it becomes empty. -// -// The caller must ensure that the given ResourceInstanceObject is not -// concurrently mutated during this call, but may be freely used again once -// this function returns. -// -// The provider address and "each mode" are resource-wide settings and so they -// are updated for all other instances of the same resource as a side-effect of -// this call. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then they will be added as a side-effect. -func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider) - s.maybePruneModule(addr.Module) -} - -// SetResourceInstanceDeposed saves the given instance object as a deposed -// generation of the resource instance with the given address and deposed key. -// -// Call this method only for pre-existing deposed objects that already have -// a known DeposedKey. For example, this method is useful if reloading objects -// that were persisted to a state file. To mark the current object as deposed, -// use DeposeResourceInstanceObject instead. -// -// The caller must ensure that the given ResourceInstanceObject is not -// concurrently mutated during this call, but may be freely used again once -// this function returns. -// -// The resource that contains the given instance must already exist in the -// state, or this method will panic. Use Resource to check first if its -// presence is not already guaranteed. -// -// Any existing current instance object for the given resource and deposed key -// is overwritten. Set obj to nil to remove the deposed object altogether. If -// the instance is left with no objects after this operation then it will -// be removed from its containing resource altogether. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then they will be added as a side-effect. -func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider) - s.maybePruneModule(addr.Module) -} - -// DeposeResourceInstanceObject moves the current instance object for the -// given resource instance address into the deposed set, leaving the instance -// without a current object. -// -// The return value is the newly-allocated deposed key, or NotDeposed if the -// given instance is already lacking a current object. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then there cannot be a current object for the -// given instance, and so NotDeposed will be returned without modifying the -// state at all. -func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return NotDeposed - } - - return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed) -} - -// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject -// but uses a pre-allocated key. It's the caller's responsibility to ensure -// that there aren't any races to use a particular key; this method will panic -// if the given key is already in use. -func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) { - s.lock.Lock() - defer s.lock.Unlock() - - if forcedKey == NotDeposed { - // Usage error: should use DeposeResourceInstanceObject in this case - panic("DeposeResourceInstanceObjectForceKey called without forced key") - } - - ms := s.state.Module(addr.Module) - if ms == nil { - return // Nothing to do, since there can't be any current object either. - } - - ms.deposeResourceInstanceObject(addr.Resource, forcedKey) -} - -// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the -// given key on the specified resource as the current object for that instance -// if and only if that would not cause us to forget an existing current -// object for that instance. -// -// Returns true if the object was restored to current, or false if no change -// was made at all. -func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool { - s.lock.Lock() - defer s.lock.Unlock() - - if key == NotDeposed { - panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey") - } - - ms := s.state.Module(addr.Module) - if ms == nil { - // Nothing to do, since the specified deposed object cannot exist. - return false - } - - return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key) -} - -// RemovePlannedResourceInstanceObjects removes from the state any resource -// instance objects that have the status ObjectPlanned, indiciating that they -// are just transient placeholders created during planning. -// -// Note that this does not restore any "ready" or "tainted" object that might -// have been present before the planned object was written. The only real use -// for this method is in preparing the state created during a refresh walk, -// where we run the planning step for certain instances just to create enough -// information to allow correct expression evaluation within provider and -// data resource blocks. Discarding planned instances in that case is okay -// because the refresh phase only creates planned objects to stand in for -// objects that don't exist yet, and thus the planned object must have been -// absent before by definition. -func (s *SyncState) RemovePlannedResourceInstanceObjects() { - // TODO: Merge together the refresh and plan phases into a single walk, - // so we can remove the need to create this "partial plan" during refresh - // that we then need to clean up before proceeding. - - s.lock.Lock() - defer s.lock.Unlock() - - for _, ms := range s.state.Modules { - moduleAddr := ms.Addr - - for _, rs := range ms.Resources { - resAddr := rs.Addr - - for ik, is := range rs.Instances { - instAddr := resAddr.Instance(ik) - - if is.Current != nil && is.Current.Status == ObjectPlanned { - // Setting the current instance to nil removes it from the - // state altogether if there are not also deposed instances. - ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig) - } - - for dk, obj := range is.Deposed { - // Deposed objects should never be "planned", but we'll - // do this anyway for the sake of completeness. - if obj.Status == ObjectPlanned { - ms.ForgetResourceInstanceDeposed(instAddr, dk) - } - } - } - } - - // We may have deleted some objects, which means that we may have - // left a module empty, and so we must prune to preserve the invariant - // that only the root module is allowed to be empty. - s.maybePruneModule(moduleAddr) - } -} - -// Lock acquires an explicit lock on the state, allowing direct read and write -// access to the returned state object. The caller must call Unlock once -// access is no longer needed, and then immediately discard the state pointer -// pointer. -// -// Most callers should not use this. Instead, use the concurrency-safe -// accessors and mutators provided directly on SyncState. -func (s *SyncState) Lock() *State { - s.lock.Lock() - return s.state -} - -// Unlock releases a lock previously acquired by Lock, at which point the -// caller must cease all use of the state pointer that was returned. -// -// Do not call this method except to end an explicit lock acquired by -// Lock. If a caller calls Unlock without first holding the lock, behavior -// is undefined. -func (s *SyncState) Unlock() { - s.lock.Unlock() -} - -// maybePruneModule will remove a module from the state altogether if it is -// empty, unless it's the root module which must always be present. -// -// This helper method is not concurrency-safe on its own, so must only be -// called while the caller is already holding the lock for writing. -func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) { - if addr.IsRoot() { - // We never prune the root. - return - } - - ms := s.state.Module(addr) - if ms == nil { - return - } - - if ms.empty() { - log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr) - s.state.RemoveModule(addr) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go deleted file mode 100644 index 59c06b70..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go +++ /dev/null @@ -1,372 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// The "contextual" family of diagnostics are designed to allow separating -// the detection of a problem from placing that problem in context. For -// example, some code that is validating an object extracted from configuration -// may not have access to the configuration that generated it, but can still -// report problems within that object which the caller can then place in -// context by calling IsConfigBody on the returned diagnostics. -// -// When contextual diagnostics are used, the documentation for a method must -// be very explicit about what context is implied for any diagnostics returned, -// to help ensure the expected result. - -// contextualFromConfig is an interface type implemented by diagnostic types -// that can elaborate themselves when given information about the configuration -// body they are embedded in. -// -// Usually this entails extracting source location information in order to -// populate the "Subject" range. -type contextualFromConfigBody interface { - ElaborateFromConfigBody(hcl.Body) Diagnostic -} - -// InConfigBody returns a copy of the receiver with any config-contextual -// diagnostics elaborated in the context of the given body. -func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics { - if len(d) == 0 { - return nil - } - - ret := make(Diagnostics, len(d)) - for i, srcDiag := range d { - if cd, isCD := srcDiag.(contextualFromConfigBody); isCD { - ret[i] = cd.ElaborateFromConfigBody(body) - } else { - ret[i] = srcDiag - } - } - - return ret -} - -// AttributeValue returns a diagnostic about an attribute value in an implied current -// configuration context. This should be returned only from functions whose -// interface specifies a clear configuration context that this will be -// resolved in. -// -// The given path is relative to the implied configuration context. To describe -// a top-level attribute, it should be a single-element cty.Path with a -// cty.GetAttrStep. It's assumed that the path is returning into a structure -// that would be produced by our conventions in the configschema package; it -// may return unexpected results for structures that can't be represented by -// configschema. -// -// Since mapping attribute paths back onto configuration is an imprecise -// operation (e.g. dynamic block generation may cause the same block to be -// evaluated multiple times) the diagnostic detail should include the attribute -// name and other context required to help the user understand what is being -// referenced in case the identified source range is not unique. -// -// The returned attribute will not have source location information until -// context is applied to the containing diagnostics using diags.InConfigBody. -// After context is applied, the source location is the value assigned to the -// named attribute, or the containing body's "missing item range" if no -// value is present. -func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { - return &attributeDiagnostic{ - diagnosticBase: diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - }, - attrPath: attrPath, - } -} - -// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains -// one. Normally this is not accessed directly, and instead the config body is -// added to the Diagnostic to create a more complete message for the user. In -// some cases however, we may want to know just the name of the attribute that -// generated the Diagnostic message. -// This returns a nil cty.Path if it does not exist in the Diagnostic. -func GetAttribute(d Diagnostic) cty.Path { - if d, ok := d.(*attributeDiagnostic); ok { - return d.attrPath - } - return nil -} - -type attributeDiagnostic struct { - diagnosticBase - attrPath cty.Path - subject *SourceRange // populated only after ElaborateFromConfigBody -} - -// ElaborateFromConfigBody finds the most accurate possible source location -// for a diagnostic's attribute path within the given body. -// -// Backing out from a path back to a source location is not always entirely -// possible because we lose some information in the decoding process, so -// if an exact position cannot be found then the returned diagnostic will -// refer to a position somewhere within the containing body, which is assumed -// to be better than no location at all. -// -// If possible it is generally better to report an error at a layer where -// source location information is still available, for more accuracy. This -// is not always possible due to system architecture, so this serves as a -// "best effort" fallback behavior for such situations. -func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { - if len(d.attrPath) < 1 { - // Should never happen, but we'll allow it rather than crashing. - return d - } - - if d.subject != nil { - // Don't modify an already-elaborated diagnostic. - return d - } - - ret := *d - - // This function will often end up re-decoding values that were already - // decoded by an earlier step. This is non-ideal but is architecturally - // more convenient than arranging for source location information to be - // propagated to every place in Terraform, and this happens only in the - // presence of errors where performance isn't a concern. - - traverse := d.attrPath[:] - final := d.attrPath[len(d.attrPath)-1] - - // Index should never be the first step - // as indexing of top blocks (such as resources & data sources) - // is handled elsewhere - if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep { - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - return &ret - } - - // Process index separately - idxStep, hasIdx := final.(cty.IndexStep) - if hasIdx { - final = d.attrPath[len(d.attrPath)-2] - traverse = d.attrPath[:len(d.attrPath)-1] - } - - // If we have more than one step after removing index - // then we'll first try to traverse to a child body - // corresponding to the requested path. - if len(traverse) > 1 { - body = traversePathSteps(traverse, body) - } - - // Default is to indicate a missing item in the deepest body we reached - // while traversing. - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - - // Once we get here, "final" should be a GetAttr step that maps to an - // attribute in our current body. - finalStep, isAttr := final.(cty.GetAttrStep) - if !isAttr { - return &ret - } - - content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: finalStep.Name, - Required: true, - }, - }, - }) - if contentDiags.HasErrors() { - return &ret - } - - if attr, ok := content.Attributes[finalStep.Name]; ok { - hclRange := attr.Expr.Range() - if hasIdx { - // Try to be more precise by finding index range - hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr) - } - subject = SourceRangeFromHCL(hclRange) - ret.subject = &subject - } - - return &ret -} - -func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body { - for i := 0; i < len(traverse); i++ { - step := traverse[i] - - switch tStep := step.(type) { - case cty.GetAttrStep: - - var next cty.PathStep - if i < (len(traverse) - 1) { - next = traverse[i+1] - } - - // Will be indexing into our result here? - var indexType cty.Type - var indexVal cty.Value - if nextIndex, ok := next.(cty.IndexStep); ok { - indexVal = nextIndex.Key - indexType = indexVal.Type() - i++ // skip over the index on subsequent iterations - } - - var blockLabelNames []string - if indexType == cty.String { - // Map traversal means we expect one label for the key. - blockLabelNames = []string{"key"} - } - - // For intermediate steps we expect to be referring to a child - // block, so we'll attempt decoding under that assumption. - content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: tStep.Name, - LabelNames: blockLabelNames, - }, - }, - }) - if contentDiags.HasErrors() { - return body - } - filtered := make([]*hcl.Block, 0, len(content.Blocks)) - for _, block := range content.Blocks { - if block.Type == tStep.Name { - filtered = append(filtered, block) - } - } - if len(filtered) == 0 { - // Step doesn't refer to a block - continue - } - - switch indexType { - case cty.NilType: // no index at all - if len(filtered) != 1 { - return body - } - body = filtered[0].Body - case cty.Number: - var idx int - err := gocty.FromCtyValue(indexVal, &idx) - if err != nil || idx >= len(filtered) { - return body - } - body = filtered[idx].Body - case cty.String: - key := indexVal.AsString() - var block *hcl.Block - for _, candidate := range filtered { - if candidate.Labels[0] == key { - block = candidate - break - } - } - if block == nil { - // No block with this key, so we'll just indicate a - // missing item in the containing block. - return body - } - body = block.Body - default: - // Should never happen, because only string and numeric indices - // are supported by cty collections. - return body - } - - default: - // For any other kind of step, we'll just return our current body - // as the subject and accept that this is a little inaccurate. - return body - } - } - return body -} - -func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range { - switch idxStep.Key.Type() { - case cty.Number: - var idx int - err := gocty.FromCtyValue(idxStep.Key, &idx) - items, diags := hcl.ExprList(attr.Expr) - if diags.HasErrors() { - return attr.Expr.Range() - } - if err != nil || idx >= len(items) { - return attr.NameRange - } - return items[idx].Range() - case cty.String: - pairs, diags := hcl.ExprMap(attr.Expr) - if diags.HasErrors() { - return attr.Expr.Range() - } - stepKey := idxStep.Key.AsString() - for _, kvPair := range pairs { - key, err := kvPair.Key.Value(nil) - if err != nil { - return attr.Expr.Range() - } - if key.AsString() == stepKey { - startRng := kvPair.Value.StartRange() - return startRng - } - } - return attr.NameRange - } - return attr.Expr.Range() -} - -func (d *attributeDiagnostic) Source() Source { - return Source{ - Subject: d.subject, - } -} - -// WholeContainingBody returns a diagnostic about the body that is an implied -// current configuration context. This should be returned only from -// functions whose interface specifies a clear configuration context that this -// will be resolved in. -// -// The returned attribute will not have source location information until -// context is applied to the containing diagnostics using diags.InConfigBody. -// After context is applied, the source location is currently the missing item -// range of the body. In future, this may change to some other suitable -// part of the containing body. -func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { - return &wholeBodyDiagnostic{ - diagnosticBase: diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - }, - } -} - -type wholeBodyDiagnostic struct { - diagnosticBase - subject *SourceRange // populated only after ElaborateFromConfigBody -} - -func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { - if d.subject != nil { - // Don't modify an already-elaborated diagnostic. - return d - } - - ret := *d - rng := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &rng - return &ret -} - -func (d *wholeBodyDiagnostic) Source() Source { - return Source{ - Subject: d.subject, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go deleted file mode 100644 index a7699cf0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go +++ /dev/null @@ -1,40 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" -) - -type Diagnostic interface { - Severity() Severity - Description() Description - Source() Source - - // FromExpr returns the expression-related context for the diagnostic, if - // available. Returns nil if the diagnostic is not related to an - // expression evaluation. - FromExpr() *FromExpr -} - -type Severity rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity - -const ( - Error Severity = 'E' - Warning Severity = 'W' -) - -type Description struct { - Summary string - Detail string -} - -type Source struct { - Subject *SourceRange - Context *SourceRange -} - -type FromExpr struct { - Expression hcl.Expression - EvalContext *hcl.EvalContext -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go deleted file mode 100644 index 8c781611..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go +++ /dev/null @@ -1,87 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" -) - -// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic -type hclDiagnostic struct { - diag *hcl.Diagnostic -} - -var _ Diagnostic = hclDiagnostic{} - -func (d hclDiagnostic) Severity() Severity { - switch d.diag.Severity { - case hcl.DiagWarning: - return Warning - default: - return Error - } -} - -func (d hclDiagnostic) Description() Description { - return Description{ - Summary: d.diag.Summary, - Detail: d.diag.Detail, - } -} - -func (d hclDiagnostic) Source() Source { - var ret Source - if d.diag.Subject != nil { - rng := SourceRangeFromHCL(*d.diag.Subject) - ret.Subject = &rng - } - if d.diag.Context != nil { - rng := SourceRangeFromHCL(*d.diag.Context) - ret.Context = &rng - } - return ret -} - -func (d hclDiagnostic) FromExpr() *FromExpr { - if d.diag.Expression == nil || d.diag.EvalContext == nil { - return nil - } - return &FromExpr{ - Expression: d.diag.Expression, - EvalContext: d.diag.EvalContext, - } -} - -// SourceRangeFromHCL constructs a SourceRange from the corresponding range -// type within the HCL package. -func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { - return SourceRange{ - Filename: hclRange.Filename, - Start: SourcePos{ - Line: hclRange.Start.Line, - Column: hclRange.Start.Column, - Byte: hclRange.Start.Byte, - }, - End: SourcePos{ - Line: hclRange.End.Line, - Column: hclRange.End.Column, - Byte: hclRange.End.Byte, - }, - } -} - -// ToHCL constructs a HCL Range from the receiving SourceRange. This is the -// opposite of SourceRangeFromHCL. -func (r SourceRange) ToHCL() hcl.Range { - return hcl.Range{ - Filename: r.Filename, - Start: hcl.Pos{ - Line: r.Start.Line, - Column: r.Start.Column, - Byte: r.Start.Byte, - }, - End: hcl.Pos{ - Line: r.End.Line, - Column: r.End.Column, - Byte: r.End.Byte, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go deleted file mode 100644 index 3031168d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go +++ /dev/null @@ -1,35 +0,0 @@ -package tfdiags - -import ( - "fmt" - "os" - "path/filepath" -) - -type SourceRange struct { - Filename string - Start, End SourcePos -} - -type SourcePos struct { - Line, Column, Byte int -} - -// StartString returns a string representation of the start of the range, -// including the filename and the line and column numbers. -func (r SourceRange) StartString() string { - filename := r.Filename - - // We'll try to relative-ize our filename here so it's less verbose - // in the common case of being in the current working directory. If not, - // we'll just show the full path. - wd, err := os.Getwd() - if err == nil { - relFn, err := filepath.Rel(wd, filename) - if err == nil { - filename = relFn - } - } - - return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go deleted file mode 100644 index eaa27373..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go +++ /dev/null @@ -1,13 +0,0 @@ -package tfdiags - -// Sourceless creates and returns a diagnostic with no source location -// information. This is generally used for operational-type errors that are -// caused by or relate to the environment where Terraform is running rather -// than to the provided configuration. -func Sourceless(severity Severity, summary, detail string) Diagnostic { - return diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go deleted file mode 100644 index 2d56dab6..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go +++ /dev/null @@ -1,36 +0,0 @@ -// The version package provides a location to set the release versions for all -// packages to consume, without creating import cycles. -// -// This package should not import any other terraform packages. -package version - -import ( - "fmt" - - version "github.com/hashicorp/go-version" -) - -// The main version number that is being run at the moment. -var Version = "0.12.7" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -var Prerelease = "sdk" - -// SemVer is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVer *version.Version - -func init() { - SemVer = version.Must(version.NewVersion(Version)) -} - -// String returns the complete version string, including prerelease -func String() string { - if Prerelease != "" { - return fmt.Sprintf("%s-%s", Version, Prerelease) - } - return Version -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go deleted file mode 100644 index 0363c2d9..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go +++ /dev/null @@ -1,36 +0,0 @@ -// The meta package provides a location to set the release version -// and any other relevant metadata for the SDK. -// -// This package should not import any other SDK packages. -package meta - -import ( - "fmt" - - version "github.com/hashicorp/go-version" -) - -// The main version number that is being run at the moment. -var SDKVersion = "1.9.0" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -var SDKPrerelease = "" - -// SemVer is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVer *version.Version - -func init() { - SemVer = version.Must(version.NewVersion(SDKVersion)) -} - -// VersionString returns the complete version string, including prerelease -func SDKVersionString() string { - if SDKPrerelease != "" { - return fmt.Sprintf("%s-%s", SDKVersion, SDKPrerelease) - } - return SDKVersion -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go deleted file mode 100644 index 5a99e900..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go +++ /dev/null @@ -1,35 +0,0 @@ -package plugin - -import ( - "os" - "os/exec" - - hclog "github.com/hashicorp/go-hclog" - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" -) - -// ClientConfig returns a configuration object that can be used to instantiate -// a client for the plugin described by the given metadata. -func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { - logger := hclog.New(&hclog.LoggerOptions{ - Name: "plugin", - Level: hclog.Trace, - Output: os.Stderr, - }) - - return &plugin.ClientConfig{ - Cmd: exec.Command(m.Path), - HandshakeConfig: Handshake, - VersionedPlugins: VersionedPlugins, - Managed: true, - Logger: logger, - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - AutoMTLS: true, - } -} - -// Client returns a plugin client for the plugin described by the given metadata. -func Client(m discovery.PluginMeta) *plugin.Client { - return plugin.NewClient(ClientConfig(m)) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go deleted file mode 100644 index e4520975..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go +++ /dev/null @@ -1,563 +0,0 @@ -package plugin - -import ( - "context" - "errors" - "log" - "sync" - - "github.com/zclconf/go-cty/cty" - - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/zclconf/go-cty/cty/msgpack" - "google.golang.org/grpc" -) - -// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. -type GRPCProviderPlugin struct { - plugin.Plugin - GRPCProvider func() proto.ProviderServer -} - -func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCProvider{ - client: proto.NewProviderClient(c), - ctx: ctx, - }, nil -} - -func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - proto.RegisterProviderServer(s, p.GRPCProvider()) - return nil -} - -// GRPCProvider handles the client, or core side of the plugin rpc connection. -// The GRPCProvider methods are mostly a translation layer between the -// terraform provioders types and the grpc proto types, directly converting -// between the two. -type GRPCProvider struct { - // PluginClient provides a reference to the plugin.Client which controls the plugin process. - // This allows the GRPCProvider a way to shutdown the plugin process. - PluginClient *plugin.Client - - // TestServer contains a grpc.Server to close when the GRPCProvider is being - // used in an end to end test of a provider. - TestServer *grpc.Server - - // Proto client use to make the grpc service calls. - client proto.ProviderClient - - // this context is created by the plugin package, and is canceled when the - // plugin process ends. - ctx context.Context - - // schema stores the schema for this provider. This is used to properly - // serialize the state for requests. - mu sync.Mutex - schemas providers.GetSchemaResponse -} - -// getSchema is used internally to get the saved provider schema. The schema -// should have already been fetched from the provider, but we have to -// synchronize access to avoid being called concurrently with GetSchema. -func (p *GRPCProvider) getSchema() providers.GetSchemaResponse { - p.mu.Lock() - // unlock inline in case GetSchema needs to be called - if p.schemas.Provider.Block != nil { - p.mu.Unlock() - return p.schemas - } - p.mu.Unlock() - - // the schema should have been fetched already, but give it another shot - // just in case things are being called out of order. This may happen for - // tests. - schemas := p.GetSchema() - if schemas.Diagnostics.HasErrors() { - panic(schemas.Diagnostics.Err()) - } - - return schemas -} - -// getResourceSchema is a helper to extract the schema for a resource, and -// panics if the schema is not available. -func (p *GRPCProvider) getResourceSchema(name string) providers.Schema { - schema := p.getSchema() - resSchema, ok := schema.ResourceTypes[name] - if !ok { - panic("unknown resource type " + name) - } - return resSchema -} - -// gettDatasourceSchema is a helper to extract the schema for a datasource, and -// panics if that schema is not available. -func (p *GRPCProvider) getDatasourceSchema(name string) providers.Schema { - schema := p.getSchema() - dataSchema, ok := schema.DataSources[name] - if !ok { - panic("unknown data source " + name) - } - return dataSchema -} - -func (p *GRPCProvider) GetSchema() (resp providers.GetSchemaResponse) { - log.Printf("[TRACE] GRPCProvider: GetSchema") - p.mu.Lock() - defer p.mu.Unlock() - - if p.schemas.Provider.Block != nil { - return p.schemas - } - - resp.ResourceTypes = make(map[string]providers.Schema) - resp.DataSources = make(map[string]providers.Schema) - - // Some providers may generate quite large schemas, and the internal default - // grpc response size limit is 4MB. 64MB should cover most any use case, and - // if we get providers nearing that we may want to consider a finer-grained - // API to fetch individual resource schemas. - // Note: this option is marked as EXPERIMENTAL in the grpc API. - const maxRecvSize = 64 << 20 - protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - if protoResp.Provider == nil { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) - return resp - } - - resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) - - for name, res := range protoResp.ResourceSchemas { - resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) - } - - for name, data := range protoResp.DataSourceSchemas { - resp.DataSources[name] = convert.ProtoToProviderSchema(data) - } - - p.schemas = resp - - return resp -} - -func (p *GRPCProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) { - log.Printf("[TRACE] GRPCProvider: PrepareProviderConfig") - - schema := p.getSchema() - ty := schema.Provider.Block.ImpliedType() - - mp, err := msgpack.Marshal(r.Config, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.PrepareProviderConfig_Request{ - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - config := cty.NullVal(ty) - if protoResp.PreparedConfig != nil { - config, err = msgpack.Unmarshal(protoResp.PreparedConfig.Msgpack, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.PreparedConfig = config - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) { - log.Printf("[TRACE] GRPCProvider: ValidateResourceTypeConfig") - resourceSchema := p.getResourceSchema(r.TypeName) - - mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateResourceTypeConfig_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) { - log.Printf("[TRACE] GRPCProvider: ValidateDataSourceConfig") - - dataSchema := p.getDatasourceSchema(r.TypeName) - - mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateDataSourceConfig_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - log.Printf("[TRACE] GRPCProvider: UpgradeResourceState") - - resSchema := p.getResourceSchema(r.TypeName) - - protoReq := &proto.UpgradeResourceState_Request{ - TypeName: r.TypeName, - Version: int64(r.Version), - RawState: &proto.RawState{ - Json: r.RawStateJSON, - Flatmap: r.RawStateFlatmap, - }, - } - - protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.UpgradedState != nil { - state, err = msgpack.Unmarshal(protoResp.UpgradedState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - - resp.UpgradedState = state - return resp -} - -func (p *GRPCProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) { - log.Printf("[TRACE] GRPCProvider: Configure") - - schema := p.getSchema() - - var mp []byte - - // we don't have anything to marshal if there's no config - mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.Configure_Request{ - TerraformVersion: r.TerraformVersion, - Config: &proto.DynamicValue{ - Msgpack: mp, - }, - } - - protoResp, err := p.client.Configure(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) Stop() error { - log.Printf("[TRACE] GRPCProvider: Stop") - - resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request)) - if err != nil { - return err - } - - if resp.Error != "" { - return errors.New(resp.Error) - } - return nil -} - -func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - log.Printf("[TRACE] GRPCProvider: ReadResource") - - resSchema := p.getResourceSchema(r.TypeName) - - mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ReadResource_Request{ - TypeName: r.TypeName, - CurrentState: &proto.DynamicValue{Msgpack: mp}, - Private: r.Private, - } - - protoResp, err := p.client.ReadResource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.NewState != nil { - state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.NewState = state - resp.Private = protoResp.Private - - return resp -} - -func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - log.Printf("[TRACE] GRPCProvider: PlanResourceChange") - - resSchema := p.getResourceSchema(r.TypeName) - - priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.PlanResourceChange_Request{ - TypeName: r.TypeName, - PriorState: &proto.DynamicValue{Msgpack: priorMP}, - Config: &proto.DynamicValue{Msgpack: configMP}, - ProposedNewState: &proto.DynamicValue{Msgpack: propMP}, - PriorPrivate: r.PriorPrivate, - } - - protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.PlannedState != nil { - state, err = msgpack.Unmarshal(protoResp.PlannedState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.PlannedState = state - - for _, p := range protoResp.RequiresReplace { - resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) - } - - resp.PlannedPrivate = protoResp.PlannedPrivate - - resp.LegacyTypeSystem = protoResp.LegacyTypeSystem - - return resp -} - -func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - log.Printf("[TRACE] GRPCProvider: ApplyResourceChange") - - resSchema := p.getResourceSchema(r.TypeName) - - priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ApplyResourceChange_Request{ - TypeName: r.TypeName, - PriorState: &proto.DynamicValue{Msgpack: priorMP}, - PlannedState: &proto.DynamicValue{Msgpack: plannedMP}, - Config: &proto.DynamicValue{Msgpack: configMP}, - PlannedPrivate: r.PlannedPrivate, - } - - protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - resp.Private = protoResp.Private - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.NewState != nil { - state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.NewState = state - - resp.LegacyTypeSystem = protoResp.LegacyTypeSystem - - return resp -} - -func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { - log.Printf("[TRACE] GRPCProvider: ImportResourceState") - - protoReq := &proto.ImportResourceState_Request{ - TypeName: r.TypeName, - Id: r.ID, - } - - protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - for _, imported := range protoResp.ImportedResources { - resource := providers.ImportedResource{ - TypeName: imported.TypeName, - Private: imported.Private, - } - - resSchema := p.getResourceSchema(resource.TypeName) - state := cty.NullVal(resSchema.Block.ImpliedType()) - if imported.State != nil { - state, err = msgpack.Unmarshal(imported.State.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resource.State = state - resp.ImportedResources = append(resp.ImportedResources, resource) - } - - return resp -} - -func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - log.Printf("[TRACE] GRPCProvider: ReadDataSource") - - dataSchema := p.getDatasourceSchema(r.TypeName) - - config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ReadDataSource_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{ - Msgpack: config, - }, - } - - protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(dataSchema.Block.ImpliedType()) - if protoResp.State != nil { - state, err = msgpack.Unmarshal(protoResp.State.Msgpack, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.State = state - - return resp -} - -// closing the grpc connection is final, and terraform will call it at the end of every phase. -func (p *GRPCProvider) Close() error { - log.Printf("[TRACE] GRPCProvider: Close") - - // Make sure to stop the server if we're not running within go-plugin. - if p.TestServer != nil { - p.TestServer.Stop() - } - - // Check this since it's not automatically inserted during plugin creation. - // It's currently only inserted by the command package, because that is - // where the factory is built and is the only point with access to the - // plugin.Client. - if p.PluginClient == nil { - log.Println("[DEBUG] provider has no plugin.Client") - return nil - } - - p.PluginClient.Kill() - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go deleted file mode 100644 index c0e6f549..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go +++ /dev/null @@ -1,178 +0,0 @@ -package plugin - -import ( - "context" - "errors" - "io" - "log" - "sync" - - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/msgpack" - "google.golang.org/grpc" -) - -// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation. -type GRPCProvisionerPlugin struct { - plugin.Plugin - GRPCProvisioner func() proto.ProvisionerServer -} - -func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCProvisioner{ - client: proto.NewProvisionerClient(c), - ctx: ctx, - }, nil -} - -func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - proto.RegisterProvisionerServer(s, p.GRPCProvisioner()) - return nil -} - -// provisioners.Interface grpc implementation -type GRPCProvisioner struct { - // PluginClient provides a reference to the plugin.Client which controls the plugin process. - // This allows the GRPCProvider a way to shutdown the plugin process. - PluginClient *plugin.Client - - client proto.ProvisionerClient - ctx context.Context - - // Cache the schema since we need it for serialization in each method call. - mu sync.Mutex - schema *configschema.Block -} - -func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) { - p.mu.Lock() - defer p.mu.Unlock() - - if p.schema != nil { - return provisioners.GetSchemaResponse{ - Provisioner: p.schema, - } - } - - protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request)) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - if protoResp.Provisioner == nil { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema")) - return resp - } - - resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block) - - p.schema = resp.Provisioner - - return resp -} - -func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { - schema := p.GetSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) - return resp - } - - mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateProvisionerConfig_Request{ - Config: &proto.DynamicValue{Msgpack: mp}, - } - protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - schema := p.GetSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) - return resp - } - - mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - // connection is always assumed to be a simple string map - connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String)) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ProvisionResource_Request{ - Config: &proto.DynamicValue{Msgpack: mp}, - Connection: &proto.DynamicValue{Msgpack: connMP}, - } - - outputClient, err := p.client.ProvisionResource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - for { - rcv, err := outputClient.Recv() - if rcv != nil { - r.UIOutput.Output(rcv.Output) - } - if err != nil { - if err != io.EOF { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - break - } - - if len(rcv.Diagnostics) > 0 { - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics)) - break - } - } - - return resp -} - -func (p *GRPCProvisioner) Stop() error { - protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{}) - if err != nil { - return err - } - if protoResp.Error != "" { - return errors.New(protoResp.Error) - } - return nil -} - -func (p *GRPCProvisioner) Close() error { - // check this since it's not automatically inserted during plugin creation - if p.PluginClient == nil { - log.Println("[DEBUG] provider has no plugin.Client") - return nil - } - - p.PluginClient.Kill() - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go deleted file mode 100644 index e4fb5776..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go +++ /dev/null @@ -1,14 +0,0 @@ -package plugin - -import ( - "github.com/hashicorp/go-plugin" -) - -// See serve.go for serving plugins - -var VersionedPlugins = map[int]plugin.PluginSet{ - 5: { - "provider": &GRPCProviderPlugin{}, - "provisioner": &GRPCProvisionerPlugin{}, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go deleted file mode 100644 index bfd62e2e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go +++ /dev/null @@ -1,620 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// ResourceProviderPlugin is the plugin.Plugin implementation. -type ResourceProviderPlugin struct { - ResourceProvider func() terraform.ResourceProvider -} - -func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProviderServer{ - Broker: b, - Provider: p.ResourceProvider(), - }, nil -} - -func (p *ResourceProviderPlugin) Client( - b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { - return &ResourceProvider{Broker: b, Client: c}, nil -} - -// ResourceProvider is an implementation of terraform.ResourceProvider -// that communicates over RPC. -type ResourceProvider struct { - Broker *plugin.MuxBroker - Client *rpc.Client -} - -func (p *ResourceProvider) Stop() error { - var resp ResourceProviderStopResponse - err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { - var result ResourceProviderGetSchemaResponse - args := &ResourceProviderGetSchemaArgs{ - Req: req, - } - - err := p.Client.Call("Plugin.GetSchema", args, &result) - if err != nil { - return nil, err - } - - if result.Error != nil { - err = result.Error - } - - return result.Schema, err -} - -func (p *ResourceProvider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - id := p.Broker.NextId() - go p.Broker.AcceptAndServe(id, &UIInputServer{ - UIInput: input, - }) - - var resp ResourceProviderInputResponse - args := ResourceProviderInputArgs{ - InputId: id, - Config: c, - } - - err := p.Client.Call("Plugin.Input", &args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - return nil, err - } - - return resp.Config, nil -} - -func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResponse - args := ResourceProviderValidateArgs{ - Config: c, - } - - err := p.Client.Call("Plugin.Validate", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResourceResponse - args := ResourceProviderValidateResourceArgs{ - Config: c, - Type: t, - } - - err := p.Client.Call("Plugin.ValidateResource", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { - var resp ResourceProviderConfigureResponse - err := p.Client.Call("Plugin.Configure", c, &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - var resp ResourceProviderApplyResponse - args := &ResourceProviderApplyArgs{ - Info: info, - State: s, - Diff: d, - } - - err := p.Client.Call("Plugin.Apply", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - var resp ResourceProviderDiffResponse - args := &ResourceProviderDiffArgs{ - Info: info, - State: s, - Config: c, - } - err := p.Client.Call("Plugin.Diff", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.Diff, err -} - -func (p *ResourceProvider) ValidateDataSource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResourceResponse - args := ResourceProviderValidateResourceArgs{ - Config: c, - Type: t, - } - - err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - var resp ResourceProviderRefreshResponse - args := &ResourceProviderRefreshArgs{ - Info: info, - State: s, - } - - err := p.Client.Call("Plugin.Refresh", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) ImportState( - info *terraform.InstanceInfo, - id string) ([]*terraform.InstanceState, error) { - var resp ResourceProviderImportStateResponse - args := &ResourceProviderImportStateArgs{ - Info: info, - Id: id, - } - - err := p.Client.Call("Plugin.ImportState", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) Resources() []terraform.ResourceType { - var result []terraform.ResourceType - - err := p.Client.Call("Plugin.Resources", new(interface{}), &result) - if err != nil { - // TODO: panic, log, what? - return nil - } - - return result -} - -func (p *ResourceProvider) ReadDataDiff( - info *terraform.InstanceInfo, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - var resp ResourceProviderReadDataDiffResponse - args := &ResourceProviderReadDataDiffArgs{ - Info: info, - Config: c, - } - - err := p.Client.Call("Plugin.ReadDataDiff", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.Diff, err -} - -func (p *ResourceProvider) ReadDataApply( - info *terraform.InstanceInfo, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - var resp ResourceProviderReadDataApplyResponse - args := &ResourceProviderReadDataApplyArgs{ - Info: info, - Diff: d, - } - - err := p.Client.Call("Plugin.ReadDataApply", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) DataSources() []terraform.DataSource { - var result []terraform.DataSource - - err := p.Client.Call("Plugin.DataSources", new(interface{}), &result) - if err != nil { - // TODO: panic, log, what? - return nil - } - - return result -} - -func (p *ResourceProvider) Close() error { - return p.Client.Close() -} - -// ResourceProviderServer is a net/rpc compatible structure for serving -// a ResourceProvider. This should not be used directly. -type ResourceProviderServer struct { - Broker *plugin.MuxBroker - Provider terraform.ResourceProvider -} - -type ResourceProviderStopResponse struct { - Error *plugin.BasicError -} - -type ResourceProviderGetSchemaArgs struct { - Req *terraform.ProviderSchemaRequest -} - -type ResourceProviderGetSchemaResponse struct { - Schema *terraform.ProviderSchema - Error *plugin.BasicError -} - -type ResourceProviderConfigureResponse struct { - Error *plugin.BasicError -} - -type ResourceProviderInputArgs struct { - InputId uint32 - Config *terraform.ResourceConfig -} - -type ResourceProviderInputResponse struct { - Config *terraform.ResourceConfig - Error *plugin.BasicError -} - -type ResourceProviderApplyArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState - Diff *terraform.InstanceDiff -} - -type ResourceProviderApplyResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderDiffArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState - Config *terraform.ResourceConfig -} - -type ResourceProviderDiffResponse struct { - Diff *terraform.InstanceDiff - Error *plugin.BasicError -} - -type ResourceProviderRefreshArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState -} - -type ResourceProviderRefreshResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderImportStateArgs struct { - Info *terraform.InstanceInfo - Id string -} - -type ResourceProviderImportStateResponse struct { - State []*terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderReadDataApplyArgs struct { - Info *terraform.InstanceInfo - Diff *terraform.InstanceDiff -} - -type ResourceProviderReadDataApplyResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderReadDataDiffArgs struct { - Info *terraform.InstanceInfo - Config *terraform.ResourceConfig -} - -type ResourceProviderReadDataDiffResponse struct { - Diff *terraform.InstanceDiff - Error *plugin.BasicError -} - -type ResourceProviderValidateArgs struct { - Config *terraform.ResourceConfig -} - -type ResourceProviderValidateResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -type ResourceProviderValidateResourceArgs struct { - Config *terraform.ResourceConfig - Type string -} - -type ResourceProviderValidateResourceResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -func (s *ResourceProviderServer) Stop( - _ interface{}, - reply *ResourceProviderStopResponse) error { - err := s.Provider.Stop() - *reply = ResourceProviderStopResponse{ - Error: plugin.NewBasicError(err), - } - - return nil -} - -func (s *ResourceProviderServer) GetSchema( - args *ResourceProviderGetSchemaArgs, - result *ResourceProviderGetSchemaResponse, -) error { - schema, err := s.Provider.GetSchema(args.Req) - result.Schema = schema - if err != nil { - result.Error = plugin.NewBasicError(err) - } - return nil -} - -func (s *ResourceProviderServer) Input( - args *ResourceProviderInputArgs, - reply *ResourceProviderInputResponse) error { - conn, err := s.Broker.Dial(args.InputId) - if err != nil { - *reply = ResourceProviderInputResponse{ - Error: plugin.NewBasicError(err), - } - return nil - } - client := rpc.NewClient(conn) - defer client.Close() - - input := &UIInput{Client: client} - - config, err := s.Provider.Input(input, args.Config) - *reply = ResourceProviderInputResponse{ - Config: config, - Error: plugin.NewBasicError(err), - } - - return nil -} - -func (s *ResourceProviderServer) Validate( - args *ResourceProviderValidateArgs, - reply *ResourceProviderValidateResponse) error { - warns, errs := s.Provider.Validate(args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) ValidateResource( - args *ResourceProviderValidateResourceArgs, - reply *ResourceProviderValidateResourceResponse) error { - warns, errs := s.Provider.ValidateResource(args.Type, args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResourceResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) Configure( - config *terraform.ResourceConfig, - reply *ResourceProviderConfigureResponse) error { - err := s.Provider.Configure(config) - *reply = ResourceProviderConfigureResponse{ - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Apply( - args *ResourceProviderApplyArgs, - result *ResourceProviderApplyResponse) error { - state, err := s.Provider.Apply(args.Info, args.State, args.Diff) - *result = ResourceProviderApplyResponse{ - State: state, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Diff( - args *ResourceProviderDiffArgs, - result *ResourceProviderDiffResponse) error { - diff, err := s.Provider.Diff(args.Info, args.State, args.Config) - *result = ResourceProviderDiffResponse{ - Diff: diff, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Refresh( - args *ResourceProviderRefreshArgs, - result *ResourceProviderRefreshResponse) error { - newState, err := s.Provider.Refresh(args.Info, args.State) - *result = ResourceProviderRefreshResponse{ - State: newState, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) ImportState( - args *ResourceProviderImportStateArgs, - result *ResourceProviderImportStateResponse) error { - states, err := s.Provider.ImportState(args.Info, args.Id) - *result = ResourceProviderImportStateResponse{ - State: states, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Resources( - nothing interface{}, - result *[]terraform.ResourceType) error { - *result = s.Provider.Resources() - return nil -} - -func (s *ResourceProviderServer) ValidateDataSource( - args *ResourceProviderValidateResourceArgs, - reply *ResourceProviderValidateResourceResponse) error { - warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResourceResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) ReadDataDiff( - args *ResourceProviderReadDataDiffArgs, - result *ResourceProviderReadDataDiffResponse) error { - diff, err := s.Provider.ReadDataDiff(args.Info, args.Config) - *result = ResourceProviderReadDataDiffResponse{ - Diff: diff, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) ReadDataApply( - args *ResourceProviderReadDataApplyArgs, - result *ResourceProviderReadDataApplyResponse) error { - newState, err := s.Provider.ReadDataApply(args.Info, args.Diff) - *result = ResourceProviderReadDataApplyResponse{ - State: newState, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) DataSources( - nothing interface{}, - result *[]terraform.DataSource) error { - *result = s.Provider.DataSources() - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go deleted file mode 100644 index cbe9fc63..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go +++ /dev/null @@ -1,100 +0,0 @@ -package plugin - -import ( - "github.com/hashicorp/go-plugin" - grpcplugin "github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -const ( - // The constants below are the names of the plugins that can be dispensed - // from the plugin server. - ProviderPluginName = "provider" - - // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify - // a particular version during their handshake. This is the version used when Terraform 0.10 - // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must - // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and - // 0.11. - DefaultProtocolVersion = 4 -) - -// Handshake is the HandshakeConfig used to configure clients and servers. -var Handshake = plugin.HandshakeConfig{ - // The ProtocolVersion is the version that must match between TF core - // and TF plugins. This should be bumped whenever a change happens in - // one or the other that makes it so that they can't safely communicate. - // This could be adding a new interface value, it could be how - // helper/schema computes diffs, etc. - ProtocolVersion: DefaultProtocolVersion, - - // The magic cookie values should NEVER be changed. - MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", - MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", -} - -type ProviderFunc func() terraform.ResourceProvider -type GRPCProviderFunc func() proto.ProviderServer - -// ServeOpts are the configurations to serve a plugin. -type ServeOpts struct { - ProviderFunc ProviderFunc - - // Wrapped versions of the above plugins will automatically shimmed and - // added to the GRPC functions when possible. - GRPCProviderFunc GRPCProviderFunc -} - -// Serve serves a plugin. This function never returns and should be the final -// function called in the main function of the plugin. -func Serve(opts *ServeOpts) { - // since the plugins may not yet be aware of the new protocol, we - // automatically wrap the plugins in the grpc shims. - if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { - provider := grpcplugin.NewGRPCProviderServerShim(opts.ProviderFunc()) - // this is almost always going to be a *schema.Provider, but check that - // we got back a valid provider just in case. - if provider != nil { - opts.GRPCProviderFunc = func() proto.ProviderServer { - return provider - } - } - } - - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: Handshake, - VersionedPlugins: pluginSet(opts), - GRPCServer: plugin.DefaultGRPCServer, - }) -} - -// pluginMap returns the legacy map[string]plugin.Plugin to use for configuring -// a plugin server or client. -func legacyPluginMap(opts *ServeOpts) map[string]plugin.Plugin { - return map[string]plugin.Plugin{ - "provider": &ResourceProviderPlugin{ - ResourceProvider: opts.ProviderFunc, - }, - } -} - -func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { - // Set the legacy netrpc plugins at version 4. - // The oldest version is returned in when executed by a legacy go-plugin - // client. - plugins := map[int]plugin.PluginSet{ - 4: legacyPluginMap(opts), - } - - // add the new protocol versions if they're configured - if opts.GRPCProviderFunc != nil { - plugins[5] = plugin.PluginSet{} - if opts.GRPCProviderFunc != nil { - plugins[5]["provider"] = &GRPCProviderPlugin{ - GRPCProvider: opts.GRPCProviderFunc, - } - } - } - return plugins -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go deleted file mode 100644 index b24b03eb..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go +++ /dev/null @@ -1,52 +0,0 @@ -package plugin - -import ( - "context" - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// UIInput is an implementation of terraform.UIInput that communicates -// over RPC. -type UIInput struct { - Client *rpc.Client -} - -func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { - var resp UIInputInputResponse - err := i.Client.Call("Plugin.Input", opts, &resp) - if err != nil { - return "", err - } - if resp.Error != nil { - err = resp.Error - return "", err - } - - return resp.Value, nil -} - -type UIInputInputResponse struct { - Value string - Error *plugin.BasicError -} - -// UIInputServer is a net/rpc compatible structure for serving -// a UIInputServer. This should not be used directly. -type UIInputServer struct { - UIInput terraform.UIInput -} - -func (s *UIInputServer) Input( - opts *terraform.InputOpts, - reply *UIInputInputResponse) error { - value, err := s.UIInput.Input(context.Background(), opts) - *reply = UIInputInputResponse{ - Value: value, - Error: plugin.NewBasicError(err), - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go deleted file mode 100644 index 07c13d03..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go +++ /dev/null @@ -1,29 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// UIOutput is an implementatin of terraform.UIOutput that communicates -// over RPC. -type UIOutput struct { - Client *rpc.Client -} - -func (o *UIOutput) Output(v string) { - o.Client.Call("Plugin.Output", v, new(interface{})) -} - -// UIOutputServer is the RPC server for serving UIOutput. -type UIOutputServer struct { - UIOutput terraform.UIOutput -} - -func (s *UIOutputServer) Output( - v string, - reply *interface{}) error { - s.UIOutput.Output(v) - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go deleted file mode 100644 index eb05c68a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go +++ /dev/null @@ -1,882 +0,0 @@ -package terraform - -import ( - "bytes" - "context" - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// InputMode defines what sort of input will be asked for when Input -// is called on Context. -type InputMode byte - -const ( - // InputModeVar asks for all variables - InputModeVar InputMode = 1 << iota - - // InputModeVarUnset asks for variables which are not set yet. - // InputModeVar must be set for this to have an effect. - InputModeVarUnset - - // InputModeProvider asks for provider variables - InputModeProvider - - // InputModeStd is the standard operating mode and asks for both variables - // and providers. - InputModeStd = InputModeVar | InputModeProvider -) - -// ContextOpts are the user-configurable options to create a context with -// NewContext. -type ContextOpts struct { - Config *configs.Config - Changes *plans.Changes - State *states.State - Targets []addrs.Targetable - Variables InputValues - Meta *ContextMeta - Destroy bool - - Hooks []Hook - Parallelism int - ProviderResolver providers.Resolver - Provisioners map[string]ProvisionerFactory - - // If non-nil, will apply as additional constraints on the provider - // plugins that will be requested from the provider resolver. - ProviderSHA256s map[string][]byte - SkipProviderVerify bool - - UIInput UIInput -} - -// ContextMeta is metadata about the running context. This is information -// that this package or structure cannot determine on its own but exposes -// into Terraform in various ways. This must be provided by the Context -// initializer. -type ContextMeta struct { - Env string // Env is the state environment -} - -// Context represents all the context that Terraform needs in order to -// perform operations on infrastructure. This structure is built using -// NewContext. -type Context struct { - config *configs.Config - changes *plans.Changes - state *states.State - targets []addrs.Targetable - variables InputValues - meta *ContextMeta - destroy bool - - hooks []Hook - components contextComponentFactory - schemas *Schemas - sh *stopHook - uiInput UIInput - - l sync.Mutex // Lock acquired during any task - parallelSem Semaphore - providerInputConfig map[string]map[string]cty.Value - providerSHA256s map[string][]byte - runCond *sync.Cond - runContext context.Context - runContextCancel context.CancelFunc - shadowErr error -} - -// (additional methods on Context can be found in context_*.go files.) - -// NewContext creates a new Context structure. -// -// Once a Context is created, the caller must not access or mutate any of -// the objects referenced (directly or indirectly) by the ContextOpts fields. -// -// If the returned diagnostics contains errors then the resulting context is -// invalid and must not be used. -func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { - log.Printf("[TRACE] terraform.NewContext: starting") - diags := CheckCoreVersionRequirements(opts.Config) - // If version constraints are not met then we'll bail early since otherwise - // we're likely to just see a bunch of other errors related to - // incompatibilities, which could be overwhelming for the user. - if diags.HasErrors() { - return nil, diags - } - - // Copy all the hooks and add our stop hook. We don't append directly - // to the Config so that we're not modifying that in-place. - sh := new(stopHook) - hooks := make([]Hook, len(opts.Hooks)+1) - copy(hooks, opts.Hooks) - hooks[len(opts.Hooks)] = sh - - state := opts.State - if state == nil { - state = states.NewState() - } - - // Determine parallelism, default to 10. We do this both to limit - // CPU pressure but also to have an extra guard against rate throttling - // from providers. - par := opts.Parallelism - if par == 0 { - par = 10 - } - - // Set up the variables in the following sequence: - // 0 - Take default values from the configuration - // 1 - Take values from TF_VAR_x environment variables - // 2 - Take values specified in -var flags, overriding values - // set by environment variables if necessary. This includes - // values taken from -var-file in addition. - var variables InputValues - if opts.Config != nil { - // Default variables from the configuration seed our map. - variables = DefaultVariableValues(opts.Config.Module.Variables) - } - // Variables provided by the caller (from CLI, environment, etc) can - // override the defaults. - variables = variables.Override(opts.Variables) - - // Bind available provider plugins to the constraints in config - var providerFactories map[string]providers.Factory - if opts.ProviderResolver != nil { - deps := ConfigTreeDependencies(opts.Config, state) - reqd := deps.AllPluginRequirements() - if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { - reqd.LockExecutables(opts.ProviderSHA256s) - } - log.Printf("[TRACE] terraform.NewContext: resolving provider version selections") - - var providerDiags tfdiags.Diagnostics - providerFactories, providerDiags = resourceProviderFactories(opts.ProviderResolver, reqd) - diags = diags.Append(providerDiags) - - if diags.HasErrors() { - return nil, diags - } - } else { - providerFactories = make(map[string]providers.Factory) - } - - components := &basicComponentFactory{ - providers: providerFactories, - provisioners: opts.Provisioners, - } - - log.Printf("[TRACE] terraform.NewContext: loading provider schemas") - schemas, err := LoadSchemas(opts.Config, opts.State, components) - if err != nil { - diags = diags.Append(err) - return nil, diags - } - - changes := opts.Changes - if changes == nil { - changes = plans.NewChanges() - } - - config := opts.Config - if config == nil { - config = configs.NewEmptyConfig() - } - - log.Printf("[TRACE] terraform.NewContext: complete") - - return &Context{ - components: components, - schemas: schemas, - destroy: opts.Destroy, - changes: changes, - hooks: hooks, - meta: opts.Meta, - config: config, - state: state, - targets: opts.Targets, - uiInput: opts.UIInput, - variables: variables, - - parallelSem: NewSemaphore(par), - providerInputConfig: make(map[string]map[string]cty.Value), - providerSHA256s: opts.ProviderSHA256s, - sh: sh, - }, nil -} - -func (c *Context) Schemas() *Schemas { - return c.schemas -} - -type ContextGraphOpts struct { - // If true, validates the graph structure (checks for cycles). - Validate bool - - // Legacy graphs only: won't prune the graph - Verbose bool -} - -// Graph returns the graph used for the given operation type. -// -// The most extensive or complex graph type is GraphTypePlan. -func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) { - if opts == nil { - opts = &ContextGraphOpts{Validate: true} - } - - log.Printf("[INFO] terraform: building graph: %s", typ) - switch typ { - case GraphTypeApply: - return (&ApplyGraphBuilder{ - Config: c.config, - Changes: c.changes, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Destroy: c.destroy, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeValidate: - // The validate graph is just a slightly modified plan graph - fallthrough - case GraphTypePlan: - // Create the plan graph builder - p := &PlanGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - } - - // Some special cases for other graph types shared with plan currently - var b GraphBuilder = p - switch typ { - case GraphTypeValidate: - b = ValidateGraphBuilder(p) - } - - return b.Build(addrs.RootModuleInstance) - - case GraphTypePlanDestroy: - return (&DestroyPlanGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeRefresh: - return (&RefreshGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeEval: - return (&EvalGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - }).Build(addrs.RootModuleInstance) - - default: - // Should never happen, because the above is exhaustive for all graph types. - panic(fmt.Errorf("unsupported graph type %s", typ)) - } -} - -// ShadowError returns any errors caught during a shadow operation. -// -// A shadow operation is an operation run in parallel to a real operation -// that performs the same tasks using new logic on copied state. The results -// are compared to ensure that the new logic works the same as the old logic. -// The shadow never affects the real operation or return values. -// -// The result of the shadow operation are only available through this function -// call after a real operation is complete. -// -// For API consumers of Context, you can safely ignore this function -// completely if you have no interest in helping report experimental feature -// errors to Terraform maintainers. Otherwise, please call this function -// after every operation and report this to the user. -// -// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect -// the real state or result of a real operation. They are purely informational -// to assist in future Terraform versions being more stable. Please message -// this effectively to the end user. -// -// This must be called only when no other operation is running (refresh, -// plan, etc.). The result can be used in parallel to any other operation -// running. -func (c *Context) ShadowError() error { - return c.shadowErr -} - -// State returns a copy of the current state associated with this context. -// -// This cannot safely be called in parallel with any other Context function. -func (c *Context) State() *states.State { - return c.state.DeepCopy() -} - -// Eval produces a scope in which expressions can be evaluated for -// the given module path. -// -// This method must first evaluate any ephemeral values (input variables, local -// values, and output values) in the configuration. These ephemeral values are -// not included in the persisted state, so they must be re-computed using other -// values in the state before they can be properly evaluated. The updated -// values are retained in the main state associated with the receiving context. -// -// This function takes no action against remote APIs but it does need access -// to all provider and provisioner instances in order to obtain their schemas -// for type checking. -// -// The result is an evaluation scope that can be used to resolve references -// against the root module. If the returned diagnostics contains errors then -// the returned scope may be nil. If it is not nil then it may still be used -// to attempt expression evaluation or other analysis, but some expressions -// may not behave as expected. -func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) { - // This is intended for external callers such as the "terraform console" - // command. Internally, we create an evaluator in c.walk before walking - // the graph, and create scopes in ContextGraphWalker. - - var diags tfdiags.Diagnostics - defer c.acquireRun("eval")() - - // Start with a copy of state so that we don't affect any instances - // that other methods may have already returned. - c.state = c.state.DeepCopy() - var walker *ContextGraphWalker - - graph, graphDiags := c.Graph(GraphTypeEval, nil) - diags = diags.Append(graphDiags) - if !diags.HasErrors() { - var walkDiags tfdiags.Diagnostics - walker, walkDiags = c.walk(graph, walkEval) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - } - - if walker == nil { - // If we skipped walking the graph (due to errors) then we'll just - // use a placeholder graph walker here, which'll refer to the - // unmodified state. - walker = c.graphWalker(walkEval) - } - - // This is a bit weird since we don't normally evaluate outside of - // the context of a walk, but we'll "re-enter" our desired path here - // just to get hold of an EvalContext for it. GraphContextBuiltin - // caches its contexts, so we should get hold of the context that was - // previously used for evaluation here, unless we skipped walking. - evalCtx := walker.EnterPath(path) - return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags -} - -// Apply applies the changes represented by this context and returns -// the resulting state. -// -// Even in the case an error is returned, the state may be returned and will -// potentially be partially updated. In addition to returning the resulting -// state, this context is updated with the latest state. -// -// If the state is required after an error, the caller should call -// Context.State, rather than rely on the return value. -// -// TODO: Apply and Refresh should either always return a state, or rely on the -// State() method. Currently the helper/resource testing framework relies -// on the absence of a returned state to determine if Destroy can be -// called, so that will need to be refactored before this can be changed. -func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) { - defer c.acquireRun("apply")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Build the graph. - graph, diags := c.Graph(GraphTypeApply, nil) - if diags.HasErrors() { - return nil, diags - } - - // Determine the operation - operation := walkApply - if c.destroy { - operation = walkDestroy - } - - // Walk the graph - walker, walkDiags := c.walk(graph, operation) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - - if c.destroy && !diags.HasErrors() { - // If we know we were trying to destroy objects anyway, and we - // completed without any errors, then we'll also prune out any - // leftover empty resource husks (left after all of the instances - // of a resource with "count" or "for_each" are destroyed) to - // help ensure we end up with an _actually_ empty state, assuming - // we weren't destroying with -target here. - // - // (This doesn't actually take into account -target, but that should - // be okay because it doesn't throw away anything we can't recompute - // on a subsequent "terraform plan" run, if the resources are still - // present in the configuration. However, this _will_ cause "count = 0" - // resources to read as unknown during the next refresh walk, which - // may cause some additional churn if used in a data resource or - // provider block, until we remove refreshing as a separate walk and - // just do it as part of the plan walk.) - c.state.PruneResourceHusks() - } - - if len(c.targets) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Applied changes may be incomplete", - `The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are pending: - terraform plan - -Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - )) - } - - return c.state, diags -} - -// Plan generates an execution plan for the given context. -// -// The execution plan encapsulates the context and can be stored -// in order to reinstantiate a context later for Apply. -// -// Plan also updates the diff of this context to be the diff generated -// by the plan, so Apply can be called after. -func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) { - defer c.acquireRun("plan")() - c.changes = plans.NewChanges() - - var diags tfdiags.Diagnostics - - if len(c.targets) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Resource targeting is in effect", - `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. - -The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - )) - } - - varVals := make(map[string]plans.DynamicValue, len(c.variables)) - for k, iv := range c.variables { - // We use cty.DynamicPseudoType here so that we'll save both the - // value _and_ its dynamic type in the plan, so we can recover - // exactly the same value later. - dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to prepare variable value for plan", - fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err), - )) - continue - } - varVals[k] = dv - } - - p := &plans.Plan{ - VariableValues: varVals, - TargetAddrs: c.targets, - ProviderSHA256s: c.providerSHA256s, - } - - var operation walkOperation - if c.destroy { - operation = walkPlanDestroy - } else { - // Set our state to be something temporary. We do this so that - // the plan can update a fake state so that variables work, then - // we replace it back with our old state. - old := c.state - if old == nil { - c.state = states.NewState() - } else { - c.state = old.DeepCopy() - } - defer func() { - c.state = old - }() - - operation = walkPlan - } - - // Build the graph. - graphType := GraphTypePlan - if c.destroy { - graphType = GraphTypePlanDestroy - } - graph, graphDiags := c.Graph(graphType, nil) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return nil, diags - } - - // Do the walk - walker, walkDiags := c.walk(graph, operation) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return nil, diags - } - p.Changes = c.changes - - return p, diags -} - -// Refresh goes through all the resources in the state and refreshes them -// to their latest state. This will update the state that this context -// works with, along with returning it. -// -// Even in the case an error is returned, the state may be returned and -// will potentially be partially updated. -func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) { - defer c.acquireRun("refresh")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Refresh builds a partial changeset as part of its work because it must - // create placeholder stubs for any resource instances that'll be created - // in subsequent plan so that provider configurations and data resources - // can interpolate from them. This plan is always thrown away after - // the operation completes, restoring any existing changeset. - oldChanges := c.changes - defer func() { c.changes = oldChanges }() - c.changes = plans.NewChanges() - - // Build the graph. - graph, diags := c.Graph(GraphTypeRefresh, nil) - if diags.HasErrors() { - return nil, diags - } - - // Do the walk - _, walkDiags := c.walk(graph, walkRefresh) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return nil, diags - } - - // During our walk we will have created planned object placeholders in - // state for resource instances that are in configuration but not yet - // created. These were created only to allow expression evaluation to - // work properly in provider and data blocks during the walk and must - // now be discarded, since a subsequent plan walk is responsible for - // creating these "for real". - // TODO: Consolidate refresh and plan into a single walk, so that the - // refresh walk doesn't need to emulate various aspects of the plan - // walk in order to properly evaluate provider and data blocks. - c.state.SyncWrapper().RemovePlannedResourceInstanceObjects() - - return c.state, diags -} - -// Stop stops the running task. -// -// Stop will block until the task completes. -func (c *Context) Stop() { - log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") - - c.l.Lock() - defer c.l.Unlock() - - // If we're running, then stop - if c.runContextCancel != nil { - log.Printf("[WARN] terraform: run context exists, stopping") - - // Tell the hook we want to stop - c.sh.Stop() - - // Stop the context - c.runContextCancel() - c.runContextCancel = nil - } - - // Grab the condition var before we exit - if cond := c.runCond; cond != nil { - log.Printf("[INFO] terraform: waiting for graceful stop to complete") - cond.Wait() - } - - log.Printf("[WARN] terraform: stop complete") -} - -// Validate performs semantic validation of the configuration, and returning -// any warnings or errors. -// -// Syntax and structural checks are performed by the configuration loader, -// and so are not repeated here. -func (c *Context) Validate() tfdiags.Diagnostics { - defer c.acquireRun("validate")() - - var diags tfdiags.Diagnostics - - // Validate input variables. We do this only for the values supplied - // by the root module, since child module calls are validated when we - // visit their graph nodes. - if c.config != nil { - varDiags := checkInputVariables(c.config.Module.Variables, c.variables) - diags = diags.Append(varDiags) - } - - // If we have errors at this point then we probably won't be able to - // construct a graph without producing redundant errors, so we'll halt early. - if diags.HasErrors() { - return diags - } - - // Build the graph so we can walk it and run Validate on nodes. - // We also validate the graph generated here, but this graph doesn't - // necessarily match the graph that Plan will generate, so we'll validate the - // graph again later after Planning. - graph, graphDiags := c.Graph(GraphTypeValidate, nil) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return diags - } - - // Walk - walker, walkDiags := c.walk(graph, walkValidate) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return diags - } - - return diags -} - -// Config returns the configuration tree associated with this context. -func (c *Context) Config() *configs.Config { - return c.config -} - -// Variables will return the mapping of variables that were defined -// for this Context. If Input was called, this mapping may be different -// than what was given. -func (c *Context) Variables() InputValues { - return c.variables -} - -// SetVariable sets a variable after a context has already been built. -func (c *Context) SetVariable(k string, v cty.Value) { - c.variables[k] = &InputValue{ - Value: v, - SourceType: ValueFromCaller, - } -} - -func (c *Context) acquireRun(phase string) func() { - // With the run lock held, grab the context lock to make changes - // to the run context. - c.l.Lock() - defer c.l.Unlock() - - // Wait until we're no longer running - for c.runCond != nil { - c.runCond.Wait() - } - - // Build our lock - c.runCond = sync.NewCond(&c.l) - - // Create a new run context - c.runContext, c.runContextCancel = context.WithCancel(context.Background()) - - // Reset the stop hook so we're not stopped - c.sh.Reset() - - // Reset the shadow errors - c.shadowErr = nil - - return c.releaseRun -} - -func (c *Context) releaseRun() { - // Grab the context lock so that we can make modifications to fields - c.l.Lock() - defer c.l.Unlock() - - // End our run. We check if runContext is non-nil because it can be - // set to nil if it was cancelled via Stop() - if c.runContextCancel != nil { - c.runContextCancel() - } - - // Unlock all waiting our condition - cond := c.runCond - c.runCond = nil - cond.Broadcast() - - // Unset the context - c.runContext = nil -} - -func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) { - log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) - - walker := c.graphWalker(operation) - - // Watch for a stop so we can call the provider Stop() API. - watchStop, watchWait := c.watchStop(walker) - - // Walk the real graph, this will block until it completes - diags := graph.Walk(walker) - - // Close the channel so the watcher stops, and wait for it to return. - close(watchStop) - <-watchWait - - return walker, diags -} - -func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker { - return &ContextGraphWalker{ - Context: c, - State: c.state.SyncWrapper(), - Changes: c.changes.SyncWrapper(), - Operation: operation, - StopContext: c.runContext, - RootVariableValues: c.variables, - } -} - -// watchStop immediately returns a `stop` and a `wait` chan after dispatching -// the watchStop goroutine. This will watch the runContext for cancellation and -// stop the providers accordingly. When the watch is no longer needed, the -// `stop` chan should be closed before waiting on the `wait` chan. -// The `wait` chan is important, because without synchronizing with the end of -// the watchStop goroutine, the runContext may also be closed during the select -// incorrectly causing providers to be stopped. Even if the graph walk is done -// at that point, stopping a provider permanently cancels its StopContext which -// can cause later actions to fail. -func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { - stop := make(chan struct{}) - wait := make(chan struct{}) - - // get the runContext cancellation channel now, because releaseRun will - // write to the runContext field. - done := c.runContext.Done() - - go func() { - defer close(wait) - // Wait for a stop or completion - select { - case <-done: - // done means the context was canceled, so we need to try and stop - // providers. - case <-stop: - // our own stop channel was closed. - return - } - - // If we're here, we're stopped, trigger the call. - log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop") - - { - // Copy the providers so that a misbehaved blocking Stop doesn't - // completely hang Terraform. - walker.providerLock.Lock() - ps := make([]providers.Interface, 0, len(walker.providerCache)) - for _, p := range walker.providerCache { - ps = append(ps, p) - } - defer walker.providerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - - { - // Call stop on all the provisioners - walker.provisionerLock.Lock() - ps := make([]provisioners.Interface, 0, len(walker.provisionerCache)) - for _, p := range walker.provisionerCache { - ps = append(ps, p) - } - defer walker.provisionerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - }() - - return stop, wait -} - -// ShimLegacyState is a helper that takes the legacy state type and -// converts it to the new state type. -// -// This is implemented as a state file upgrade, so it will not preserve -// parts of the state structure that are not included in a serialized state, -// such as the resolved results of any local values, outputs in non-root -// modules, etc. -func ShimLegacyState(legacy *State) (*states.State, error) { - if legacy == nil { - return nil, nil - } - var buf bytes.Buffer - err := WriteState(legacy, &buf) - if err != nil { - return nil, err - } - f, err := statefile.Read(&buf) - if err != nil { - return nil, err - } - return f.State, err -} - -// MustShimLegacyState is a wrapper around ShimLegacyState that panics if -// the conversion does not succeed. This is primarily intended for tests where -// the given legacy state is an object constructed within the test. -func MustShimLegacyState(legacy *State) *states.State { - ret, err := ShimLegacyState(legacy) - if err != nil { - panic(err) - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go deleted file mode 100644 index a627996e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go +++ /dev/null @@ -1,68 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" -) - -// contextComponentFactory is the interface that Context uses -// to initialize various components such as providers and provisioners. -// This factory gets more information than the raw maps using to initialize -// a Context. This information is used for debugging. -type contextComponentFactory interface { - // ResourceProvider creates a new ResourceProvider with the given - // type. The "uid" is a unique identifier for this provider being - // initialized that can be used for internal tracking. - ResourceProvider(typ, uid string) (providers.Interface, error) - ResourceProviders() []string - - // ResourceProvisioner creates a new ResourceProvisioner with the - // given type. The "uid" is a unique identifier for this provisioner - // being initialized that can be used for internal tracking. - ResourceProvisioner(typ, uid string) (provisioners.Interface, error) - ResourceProvisioners() []string -} - -// basicComponentFactory just calls a factory from a map directly. -type basicComponentFactory struct { - providers map[string]providers.Factory - provisioners map[string]ProvisionerFactory -} - -func (c *basicComponentFactory) ResourceProviders() []string { - result := make([]string, len(c.providers)) - for k := range c.providers { - result = append(result, k) - } - - return result -} - -func (c *basicComponentFactory) ResourceProvisioners() []string { - result := make([]string, len(c.provisioners)) - for k := range c.provisioners { - result = append(result, k) - } - - return result -} - -func (c *basicComponentFactory) ResourceProvider(typ, uid string) (providers.Interface, error) { - f, ok := c.providers[typ] - if !ok { - return nil, fmt.Errorf("unknown provider %q", typ) - } - - return f() -} - -func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (provisioners.Interface, error) { - f, ok := c.provisioners[typ] - if !ok { - return nil, fmt.Errorf("unknown provisioner %q", typ) - } - - return f() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go deleted file mode 100644 index 4448d870..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go +++ /dev/null @@ -1,32 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=GraphType context_graph_type.go - -// GraphType is an enum of the type of graph to create with a Context. -// The values of the constants may change so they shouldn't be depended on; -// always use the constant name. -type GraphType byte - -const ( - GraphTypeInvalid GraphType = 0 - GraphTypeLegacy GraphType = iota - GraphTypeRefresh - GraphTypePlan - GraphTypePlanDestroy - GraphTypeApply - GraphTypeValidate - GraphTypeEval // only visits in-memory elements such as variables, locals, and outputs. -) - -// GraphTypeMap is a mapping of human-readable string to GraphType. This -// is useful to use as the mechanism for human input for configurable -// graph types. -var GraphTypeMap = map[string]GraphType{ - "apply": GraphTypeApply, - "plan": GraphTypePlan, - "plan-destroy": GraphTypePlanDestroy, - "refresh": GraphTypeRefresh, - "legacy": GraphTypeLegacy, - "validate": GraphTypeValidate, - "eval": GraphTypeEval, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go deleted file mode 100644 index 9a9cd962..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go +++ /dev/null @@ -1,83 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ImportOpts are used as the configuration for Import. -type ImportOpts struct { - // Targets are the targets to import - Targets []*ImportTarget - - // Config is optional, and specifies a config tree that will be loaded - // into the graph and evaluated. This is the source for provider - // configurations. - Config *configs.Config -} - -// ImportTarget is a single resource to import. -type ImportTarget struct { - // Addr is the address for the resource instance that the new object should - // be imported into. - Addr addrs.AbsResourceInstance - - // ID is the ID of the resource to import. This is resource-specific. - ID string - - // ProviderAddr is the address of the provider that should handle the import. - ProviderAddr addrs.AbsProviderConfig -} - -// Import takes already-created external resources and brings them -// under Terraform management. Import requires the exact type, name, and ID -// of the resources to import. -// -// This operation is idempotent. If the requested resource is already -// imported, no changes are made to the state. -// -// Further, this operation also gracefully handles partial state. If during -// an import there is a failure, all previously imported resources remain -// imported. -func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Hold a lock since we can modify our own state here - defer c.acquireRun("import")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // If no module is given, default to the module configured with - // the Context. - config := opts.Config - if config == nil { - config = c.config - } - - // Initialize our graph builder - builder := &ImportGraphBuilder{ - ImportTargets: opts.Targets, - Config: config, - Components: c.components, - Schemas: c.schemas, - } - - // Build the graph! - graph, graphDiags := builder.Build(addrs.RootModuleInstance) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return c.state, diags - } - - // Walk it - _, walkDiags := c.walk(graph, walkImport) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return c.state, diags - } - - return c.state, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go deleted file mode 100644 index b99f1afa..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go +++ /dev/null @@ -1,251 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Input asks for input to fill variables and provider configurations. -// This modifies the configuration in-place, so asking for Input twice -// may result in different UI output showing different current values. -func (c *Context) Input(mode InputMode) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - defer c.acquireRun("input")() - - if c.uiInput == nil { - log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping") - return diags - } - - ctx := context.Background() - - if mode&InputModeVar != 0 { - log.Printf("[TRACE] Context.Input: Prompting for variables") - - // Walk the variables first for the root module. We walk them in - // alphabetical order for UX reasons. - configs := c.config.Module.Variables - names := make([]string, 0, len(configs)) - for name := range configs { - names = append(names, name) - } - sort.Strings(names) - Variables: - for _, n := range names { - v := configs[n] - - // If we only care about unset variables, then we should set any - // variable that is already set. - if mode&InputModeVarUnset != 0 { - if _, isSet := c.variables[n]; isSet { - continue - } - } - - // this should only happen during tests - if c.uiInput == nil { - log.Println("[WARN] Context.uiInput is nil during input walk") - continue - } - - // Ask the user for a value for this variable - var rawValue string - retry := 0 - for { - var err error - rawValue, err = c.uiInput.Input(ctx, &InputOpts{ - Id: fmt.Sprintf("var.%s", n), - Query: fmt.Sprintf("var.%s", n), - Description: v.Description, - }) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to request interactive input", - fmt.Sprintf("Terraform attempted to request a value for var.%s interactively, but encountered an error: %s.", n, err), - )) - return diags - } - - if rawValue == "" && v.Default == cty.NilVal { - // Redo if it is required, but abort if we keep getting - // blank entries - if retry > 2 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Required variable not assigned", - fmt.Sprintf("The variable %q is required, so Terraform cannot proceed without a defined value for it.", n), - )) - continue Variables - } - retry++ - continue - } - - break - } - - val, valDiags := v.ParsingMode.Parse(n, rawValue) - diags = diags.Append(valDiags) - if diags.HasErrors() { - continue - } - - c.variables[n] = &InputValue{ - Value: val, - SourceType: ValueFromInput, - } - } - } - - if mode&InputModeProvider != 0 { - log.Printf("[TRACE] Context.Input: Prompting for provider arguments") - - // We prompt for input only for provider configurations defined in - // the root module. At the time of writing that is an arbitrary - // restriction, but we have future plans to support "count" and - // "for_each" on modules that will then prevent us from supporting - // input for child module configurations anyway (since we'd need to - // dynamic-expand first), and provider configurations in child modules - // are not recommended since v0.11 anyway, so this restriction allows - // us to keep this relatively simple without significant hardship. - - pcs := make(map[string]*configs.Provider) - pas := make(map[string]addrs.ProviderConfig) - for _, pc := range c.config.Module.ProviderConfigs { - addr := pc.Addr() - pcs[addr.String()] = pc - pas[addr.String()] = addr - log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange) - } - // We also need to detect _implied_ provider configs from resources. - // These won't have *configs.Provider objects, but they will still - // exist in the map and we'll just treat them as empty below. - for _, rc := range c.config.Module.ManagedResources { - pa := rc.ProviderConfigAddr() - if pa.Alias != "" { - continue // alias configurations cannot be implied - } - if _, exists := pcs[pa.String()]; !exists { - pcs[pa.String()] = nil - pas[pa.String()] = pa - log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange) - } - } - for _, rc := range c.config.Module.DataResources { - pa := rc.ProviderConfigAddr() - if pa.Alias != "" { - continue // alias configurations cannot be implied - } - if _, exists := pcs[pa.String()]; !exists { - pcs[pa.String()] = nil - pas[pa.String()] = pa - log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange) - } - } - - for pk, pa := range pas { - pc := pcs[pk] // will be nil if this is an implied config - - // Wrap the input into a namespace - input := &PrefixUIInput{ - IdPrefix: pk, - QueryPrefix: pk + ".", - UIInput: c.uiInput, - } - - schema := c.schemas.ProviderConfig(pa.Type) - if schema == nil { - // Could either be an incorrect config or just an incomplete - // mock in tests. We'll let a later pass decide, and just - // ignore this for the purposes of gathering input. - log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.Type) - continue - } - - // For our purposes here we just want to detect if attrbutes are - // set in config at all, so rather than doing a full decode - // (which would require us to prepare an evalcontext, etc) we'll - // use the low-level HCL API to process only the top-level - // structure. - var attrExprs hcl.Attributes // nil if there is no config - if pc != nil && pc.Config != nil { - lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec())) - content, _, diags := pc.Config.PartialContent(lowLevelSchema) - if diags.HasErrors() { - log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error()) - continue - } - attrExprs = content.Attributes - } - - keys := make([]string, 0, len(schema.Attributes)) - for key := range schema.Attributes { - keys = append(keys, key) - } - sort.Strings(keys) - - vals := map[string]cty.Value{} - for _, key := range keys { - attrS := schema.Attributes[key] - if attrS.Optional { - continue - } - if attrExprs != nil { - if _, exists := attrExprs[key]; exists { - continue - } - } - if !attrS.Type.Equals(cty.String) { - continue - } - - log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key) - rawVal, err := input.Input(ctx, &InputOpts{ - Id: key, - Query: key, - Description: attrS.Description, - }) - if err != nil { - log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err) - continue - } - - vals[key] = cty.StringVal(rawVal) - } - - c.providerInputConfig[pk] = vals - log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals) - } - } - - return diags -} - -// schemaForInputSniffing returns a transformed version of a given schema -// that marks all attributes as optional, which the Context.Input method can -// use to detect whether a required argument is set without missing arguments -// themselves generating errors. -func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), - Blocks: schema.Blocks, - } - - for i, attrS := range schema.Attributes { - ret.Attributes[i] = attrS - ret.Attributes[i].Required = false - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go deleted file mode 100644 index 17464bc0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go +++ /dev/null @@ -1,17 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// DestroyEdge is an edge that represents a standard "destroy" relationship: -// Target depends on Source because Source is destroying. -type DestroyEdge struct { - S, T dag.Vertex -} - -func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) } -func (e *DestroyEdge) Source() dag.Vertex { return e.S } -func (e *DestroyEdge) Target() dag.Vertex { return e.T } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go deleted file mode 100644 index c490c3bc..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalNode is the interface that must be implemented by graph nodes to -// evaluate/execute. -type EvalNode interface { - // Eval evaluates this node with the given context. The second parameter - // are the argument values. These will match in order and 1-1 with the - // results of the Args() return value. - Eval(EvalContext) (interface{}, error) -} - -// GraphNodeEvalable is the interface that graph nodes must implement -// to enable valuation. -type GraphNodeEvalable interface { - EvalTree() EvalNode -} - -// EvalEarlyExitError is a special error return value that can be returned -// by eval nodes that does an early exit. -type EvalEarlyExitError struct{} - -func (EvalEarlyExitError) Error() string { return "early exit" } - -// Eval evaluates the given EvalNode with the given context, properly -// evaluating all args in the correct order. -func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { - // Call the lower level eval which doesn't understand early exit, - // and if we early exit, it isn't an error. - result, err := EvalRaw(n, ctx) - if err != nil { - if _, ok := err.(EvalEarlyExitError); ok { - return nil, nil - } - } - - return result, err -} - -// EvalRaw is like Eval except that it returns all errors, even if they -// signal something normal such as EvalEarlyExitError. -func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { - path := "unknown" - if ctx != nil { - path = ctx.Path().String() - } - if path == "" { - path = "" - } - - log.Printf("[TRACE] %s: eval: %T", path, n) - output, err := n.Eval(ctx) - if err != nil { - switch err.(type) { - case EvalEarlyExitError: - log.Printf("[TRACE] %s: eval: %T, early exit err: %s", path, n, err) - case tfdiags.NonFatalError: - log.Printf("[WARN] %s: eval: %T, non-fatal err: %s", path, n, err) - default: - log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) - } - } - - return output, err -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go deleted file mode 100644 index 6beeaea9..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go +++ /dev/null @@ -1,656 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalApply is an EvalNode implementation that writes the diff to -// the full diff. -type EvalApply struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Dependencies []addrs.Referenceable - State **states.ResourceInstanceObject - Change **plans.ResourceInstanceChange - ProviderAddr addrs.AbsProviderConfig - Provider *providers.Interface - ProviderSchema **ProviderSchema - Output **states.ResourceInstanceObject - CreateNew *bool - Error *error -} - -// TODO: test -func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - change := *n.Change - provider := *n.Provider - state := *n.State - absAddr := n.Addr.Absolute(ctx.Path()) - - if state == nil { - state = &states.ResourceInstanceObject{} - } - - schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - if n.CreateNew != nil { - *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace()) - } - - configVal := cty.NullVal(cty.DynamicPseudoType) - if n.Config != nil { - var configDiags tfdiags.Diagnostics - forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - - if !configVal.IsWhollyKnown() { - return nil, fmt.Errorf( - "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", - absAddr, - ) - } - - log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action) - resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - PriorState: change.Before, - Config: configVal, - PlannedState: change.After, - PlannedPrivate: change.Private, - }) - applyDiags := resp.Diagnostics - if n.Config != nil { - applyDiags = applyDiags.InConfigBody(n.Config.Config) - } - diags = diags.Append(applyDiags) - - // Even if there are errors in the returned diagnostics, the provider may - // have returned a _partial_ state for an object that already exists but - // failed to fully configure, and so the remaining code must always run - // to completion but must be defensive against the new value being - // incomplete. - newVal := resp.NewState - - if newVal == cty.NilVal { - // Providers are supposed to return a partial new value even when errors - // occur, but sometimes they don't and so in that case we'll patch that up - // by just using the prior state, so we'll at least keep track of the - // object for the user to retry. - newVal = change.Before - - // As a special case, we'll set the new value to null if it looks like - // we were trying to execute a delete, because the provider in this case - // probably left the newVal unset intending it to be interpreted as "null". - if change.After.IsNull() { - newVal = cty.NullVal(schema.ImpliedType()) - } - - // Ideally we'd produce an error or warning here if newVal is nil and - // there are no errors in diags, because that indicates a buggy - // provider not properly reporting its result, but unfortunately many - // of our historical test mocks behave in this way and so producing - // a diagnostic here fails hundreds of tests. Instead, we must just - // silently retain the old value for now. Returning a nil value with - // no errors is still always considered a bug in the provider though, - // and should be fixed for any "real" providers that do it. - } - - var conformDiags tfdiags.Diagnostics - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - conformDiags = conformDiags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - diags = diags.Append(conformDiags) - if conformDiags.HasErrors() { - // Bail early in this particular case, because an object that doesn't - // conform to the schema can't be saved in the state anyway -- the - // serializer will reject it. - return nil, diags.Err() - } - - // After this point we have a type-conforming result object and so we - // must always run to completion to ensure it can be saved. If n.Error - // is set then we must not return a non-nil error, in order to allow - // evaluation to continue to a later point where our state object will - // be saved. - - // By this point there must not be any unknown values remaining in our - // object, because we've applied the change and we can't save unknowns - // in our persistent state. If any are present then we will indicate an - // error (which is always a bug in the provider) but we will also replace - // them with nulls so that we can successfully save the portions of the - // returned value that are known. - if !newVal.IsWhollyKnown() { - // To generate better error messages, we'll go for a walk through the - // value and make a separate diagnostic for each unknown value we - // find. - cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { - if !val.IsKnown() { - pathStr := tfdiags.FormatCtyPath(path) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", - n.Addr.Absolute(ctx.Path()), pathStr, - ), - )) - } - return true, nil - }) - - // NOTE: This operation can potentially be lossy if there are multiple - // elements in a set that differ only by unknown values: after - // replacing with null these will be merged together into a single set - // element. Since we can only get here in the presence of a provider - // bug, we accept this because storing a result here is always a - // best-effort sort of thing. - newVal = cty.UnknownAsNull(newVal) - } - - if change.Action != plans.Delete && !diags.HasErrors() { - // Only values that were marked as unknown in the planned value are allowed - // to change during the apply operation. (We do this after the unknown-ness - // check above so that we also catch anything that became unknown after - // being known during plan.) - // - // If we are returning other errors anyway then we'll give this - // a pass since the other errors are usually the explanation for - // this one and so it's more helpful to let the user focus on the - // root cause rather than distract with this extra problem. - if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - - // The sort of inconsistency we won't catch here is if a known value - // in the plan is changed during apply. That can cause downstream - // problems because a dependent resource would make its own plan based - // on the planned value, and thus get a different result during the - // apply phase. This will usually lead to a "Provider produced invalid plan" - // error that incorrectly blames the downstream resource for the change. - - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent result after apply", - fmt.Sprintf( - "When applying changes to %s, provider %q produced an unexpected new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), - ), - )) - } - } - } - } - - // If a provider returns a null or non-null object at the wrong time then - // we still want to save that but it often causes some confusing behaviors - // where it seems like Terraform is failing to take any action at all, - // so we'll generate some errors to draw attention to it. - if !diags.HasErrors() { - if change.Action == plans.Delete && !newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", - change.Action, n.Addr.Absolute(ctx.Path()), - ), - )) - } - if change.Action != plans.Delete && newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", - change.Action, n.Addr.Absolute(ctx.Path()), - ), - )) - } - } - - // Sometimes providers return a null value when an operation fails for some - // reason, but we'd rather keep the prior state so that the error can be - // corrected on a subsequent run. We must only do this for null new value - // though, or else we may discard partial updates the provider was able to - // complete. - if diags.HasErrors() && newVal.IsNull() { - // Otherwise, we'll continue but using the prior state as the new value, - // making this effectively a no-op. If the item really _has_ been - // deleted then our next refresh will detect that and fix it up. - // If change.Action is Create then change.Before will also be null, - // which is fine. - newVal = change.Before - } - - var newState *states.ResourceInstanceObject - if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case - newState = &states.ResourceInstanceObject{ - Status: states.ObjectReady, - Value: newVal, - Private: resp.Private, - Dependencies: n.Dependencies, // Should be populated by the caller from the StateDependencies method on the resource instance node - } - } - - // Write the final state - if n.Output != nil { - *n.Output = newState - } - - if diags.HasErrors() { - // If the caller provided an error pointer then they are expected to - // handle the error some other way and we treat our own result as - // success. - if n.Error != nil { - err := diags.Err() - *n.Error = err - log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err) - return nil, nil - } - } - - return nil, diags.ErrWithWarnings() -} - -// EvalApplyPre is an EvalNode implementation that does the pre-Apply work -type EvalApplyPre struct { - Addr addrs.ResourceInstance - Gen states.Generation - State **states.ResourceInstanceObject - Change **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { - change := *n.Change - absAddr := n.Addr.Absolute(ctx.Path()) - - if change == nil { - panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr)) - } - - if resourceHasUserVisibleApply(n.Addr) { - priorState := change.Before - plannedNewState := change.After - - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalApplyPost is an EvalNode implementation that does the post-Apply work -type EvalApplyPost struct { - Addr addrs.ResourceInstance - Gen states.Generation - State **states.ResourceInstanceObject - Error *error -} - -// TODO: test -func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - - if resourceHasUserVisibleApply(n.Addr) { - absAddr := n.Addr.Absolute(ctx.Path()) - var newState cty.Value - if state != nil { - newState = state.Value - } else { - newState = cty.NullVal(cty.DynamicPseudoType) - } - var err error - if n.Error != nil { - err = *n.Error - } - - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(absAddr, n.Gen, newState, err) - }) - if hookErr != nil { - return nil, hookErr - } - } - - return nil, *n.Error -} - -// EvalMaybeTainted is an EvalNode that takes the planned change, new value, -// and possible error from an apply operation and produces a new instance -// object marked as tainted if it appears that a create operation has failed. -// -// This EvalNode never returns an error, to ensure that a subsequent EvalNode -// can still record the possibly-tainted object in the state. -type EvalMaybeTainted struct { - Addr addrs.ResourceInstance - Gen states.Generation - Change **plans.ResourceInstanceChange - State **states.ResourceInstanceObject - Error *error - - // If StateOutput is not nil, its referent will be assigned either the same - // pointer as State or a new object with its status set as Tainted, - // depending on whether an error is given and if this was a create action. - StateOutput **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - change := *n.Change - err := *n.Error - - if state != nil && state.Status == states.ObjectTainted { - log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path())) - return nil, nil - } - - if n.StateOutput != nil { - if err != nil && change.Action == plans.Create { - // If there are errors during a _create_ then the object is - // in an undefined state, and so we'll mark it as tainted so - // we can try again on the next run. - // - // We don't do this for other change actions because errors - // during updates will often not change the remote object at all. - // If there _were_ changes prior to the error, it's the provider's - // responsibility to record the effect of those changes in the - // object value it returned. - log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path())) - *n.StateOutput = state.AsTainted() - } else { - *n.StateOutput = state - } - } - - return nil, nil -} - -// resourceHasUserVisibleApply returns true if the given resource is one where -// apply actions should be exposed to the user. -// -// Certain resources do apply actions only as an implementation detail, so -// these should not be advertised to code outside of this package. -func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool { - // Only managed resources have user-visible apply actions. - // In particular, this excludes data resources since we "apply" these - // only as an implementation detail of removing them from state when - // they are destroyed. (When reading, they don't get here at all because - // we present them as "Refresh" actions.) - return addr.ContainingResource().Mode == addrs.ManagedResourceMode -} - -// EvalApplyProvisioners is an EvalNode implementation that executes -// the provisioners for a resource. -// -// TODO(mitchellh): This should probably be split up into a more fine-grained -// ApplyProvisioner (single) that is looped over. -type EvalApplyProvisioners struct { - Addr addrs.ResourceInstance - State **states.ResourceInstanceObject - ResourceConfig *configs.Resource - CreateNew *bool - Error *error - - // When is the type of provisioner to run at this point - When configs.ProvisionerWhen -} - -// TODO: test -func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := *n.State - if state == nil { - log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) - return nil, nil - } - if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew { - // If we're not creating a new resource, then don't run provisioners - log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) - return nil, nil - } - if state.Status == states.ObjectTainted { - // No point in provisioning an object that is already tainted, since - // it's going to get recreated on the next apply anyway. - log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) - return nil, nil - } - - provs := n.filterProvisioners() - if len(provs) == 0 { - // We have no provisioners, so don't do anything - return nil, nil - } - - if n.Error != nil && *n.Error != nil { - // We're already tainted, so just return out - return nil, nil - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstance(absAddr, state.Value) - }) - if err != nil { - return nil, err - } - } - - // If there are no errors, then we append it to our output error - // if we have one, otherwise we just output it. - err := n.apply(ctx, provs) - if err != nil { - *n.Error = multierror.Append(*n.Error, err) - if n.Error == nil { - return nil, err - } else { - log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr) - return nil, nil - } - } - - { - // Call post hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstance(absAddr, state.Value) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// filterProvisioners filters the provisioners on the resource to only -// the provisioners specified by the "when" option. -func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner { - // Fast path the zero case - if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil { - return nil - } - - if len(n.ResourceConfig.Managed.Provisioners) == 0 { - return nil - } - - result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners)) - for _, p := range n.ResourceConfig.Managed.Provisioners { - if p.When == n.When { - result = append(result, p) - } - } - - return result -} - -func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error { - var diags tfdiags.Diagnostics - instanceAddr := n.Addr - absAddr := instanceAddr.Absolute(ctx.Path()) - - // If there's a connection block defined directly inside the resource block - // then it'll serve as a base connection configuration for all of the - // provisioners. - var baseConn hcl.Body - if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil { - baseConn = n.ResourceConfig.Managed.Connection.Config - } - - for _, prov := range provs { - log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type) - - // Get the provisioner - provisioner := ctx.Provisioner(prov.Type) - schema := ctx.ProvisionerSchema(prov.Type) - - forEach, forEachDiags := evaluateResourceForEachExpression(n.ResourceConfig.ForEach, ctx) - diags = diags.Append(forEachDiags) - keyData := EvalDataForInstanceKey(instanceAddr.Key, forEach) - - // Evaluate the main provisioner configuration. - config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) - diags = diags.Append(configDiags) - - // If the provisioner block contains a connection block of its own then - // it can override the base connection configuration, if any. - var localConn hcl.Body - if prov.Connection != nil { - localConn = prov.Connection.Config - } - - var connBody hcl.Body - switch { - case baseConn != nil && localConn != nil: - // Our standard merging logic applies here, similar to what we do - // with _override.tf configuration files: arguments from the - // base connection block will be masked by any arguments of the - // same name in the local connection block. - connBody = configs.MergeBodies(baseConn, localConn) - case baseConn != nil: - connBody = baseConn - case localConn != nil: - connBody = localConn - } - - // start with an empty connInfo - connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) - - if connBody != nil { - var connInfoDiags tfdiags.Diagnostics - connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData) - diags = diags.Append(connInfoDiags) - if diags.HasErrors() { - // "on failure continue" setting only applies to failures of the - // provisioner itself, not to invalid configuration. - return diags.Err() - } - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstanceStep(absAddr, prov.Type) - }) - if err != nil { - return err - } - } - - // The output function - outputFn := func(msg string) { - ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(absAddr, prov.Type, msg) - return HookActionContinue, nil - }) - } - - output := CallbackUIOutput{OutputFn: outputFn} - resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: config, - Connection: connInfo, - UIOutput: &output, - }) - applyDiags := resp.Diagnostics.InConfigBody(prov.Config) - - // Call post hook - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err()) - }) - - switch prov.OnFailure { - case configs.ProvisionerOnFailureContinue: - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) - } else { - // Maybe there are warnings that we still want to see - diags = diags.Append(applyDiags) - } - default: - diags = diags.Append(applyDiags) - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) - return diags.Err() - } - } - - // Deal with the hook - if hookErr != nil { - return hookErr - } - } - - return diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go deleted file mode 100644 index d13a9652..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go +++ /dev/null @@ -1,47 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalPreventDestroy is an EvalNode implementation that returns an -// error if a resource has PreventDestroy configured and the diff -// would destroy the resource. -type EvalCheckPreventDestroy struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Change **plans.ResourceInstanceChange -} - -func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { - if n.Change == nil || *n.Change == nil || n.Config == nil || n.Config.Managed == nil { - return nil, nil - } - - change := *n.Change - preventDestroy := n.Config.Managed.PreventDestroy - - if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy { - var diags tfdiags.Diagnostics - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Instance cannot be destroyed", - Detail: fmt.Sprintf( - "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.", - n.Addr.Absolute(ctx.Path()).String(), - ), - Subject: &n.Config.DeclRange, - }) - return nil, diags.Err() - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go deleted file mode 100644 index 4fa011e2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go +++ /dev/null @@ -1,133 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// EvalContext is the interface that is given to eval nodes to execute. -type EvalContext interface { - // Stopped returns a channel that is closed when evaluation is stopped - // via Terraform.Context.Stop() - Stopped() <-chan struct{} - - // Path is the current module path. - Path() addrs.ModuleInstance - - // Hook is used to call hook methods. The callback is called for each - // hook and should return the hook action to take and the error. - Hook(func(Hook) (HookAction, error)) error - - // Input is the UIInput object for interacting with the UI. - Input() UIInput - - // InitProvider initializes the provider with the given type and address, and - // returns the implementation of the resource provider or an error. - // - // It is an error to initialize the same provider more than once. - InitProvider(typ string, addr addrs.ProviderConfig) (providers.Interface, error) - - // Provider gets the provider instance with the given address (already - // initialized) or returns nil if the provider isn't initialized. - // - // This method expects an _absolute_ provider configuration address, since - // resources in one module are able to use providers from other modules. - // InitProvider must've been called on the EvalContext of the module - // that owns the given provider before calling this method. - Provider(addrs.AbsProviderConfig) providers.Interface - - // ProviderSchema retrieves the schema for a particular provider, which - // must have already been initialized with InitProvider. - // - // This method expects an _absolute_ provider configuration address, since - // resources in one module are able to use providers from other modules. - ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema - - // CloseProvider closes provider connections that aren't needed anymore. - CloseProvider(addrs.ProviderConfig) error - - // ConfigureProvider configures the provider with the given - // configuration. This is a separate context call because this call - // is used to store the provider configuration for inheritance lookups - // with ParentProviderConfig(). - ConfigureProvider(addrs.ProviderConfig, cty.Value) tfdiags.Diagnostics - - // ProviderInput and SetProviderInput are used to configure providers - // from user input. - ProviderInput(addrs.ProviderConfig) map[string]cty.Value - SetProviderInput(addrs.ProviderConfig, map[string]cty.Value) - - // InitProvisioner initializes the provisioner with the given name and - // returns the implementation of the resource provisioner or an error. - // - // It is an error to initialize the same provisioner more than once. - InitProvisioner(string) (provisioners.Interface, error) - - // Provisioner gets the provisioner instance with the given name (already - // initialized) or returns nil if the provisioner isn't initialized. - Provisioner(string) provisioners.Interface - - // ProvisionerSchema retrieves the main configuration schema for a - // particular provisioner, which must have already been initialized with - // InitProvisioner. - ProvisionerSchema(string) *configschema.Block - - // CloseProvisioner closes provisioner connections that aren't needed - // anymore. - CloseProvisioner(string) error - - // EvaluateBlock takes the given raw configuration block and associated - // schema and evaluates it to produce a value of an object type that - // conforms to the implied type of the schema. - // - // The "self" argument is optional. If given, it is the referenceable - // address that the name "self" should behave as an alias for when - // evaluating. Set this to nil if the "self" object should not be available. - // - // The "key" argument is also optional. If given, it is the instance key - // of the current object within the multi-instance container it belongs - // to. For example, on a resource block with "count" set this should be - // set to a different addrs.IntKey for each instance created from that - // block. Set this to addrs.NoKey if not appropriate. - // - // The returned body is an expanded version of the given body, with any - // "dynamic" blocks replaced with zero or more static blocks. This can be - // used to extract correct source location information about attributes of - // the returned object value. - EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) - - // EvaluateExpr takes the given HCL expression and evaluates it to produce - // a value. - // - // The "self" argument is optional. If given, it is the referenceable - // address that the name "self" should behave as an alias for when - // evaluating. Set this to nil if the "self" object should not be available. - EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) - - // EvaluationScope returns a scope that can be used to evaluate reference - // addresses in this context. - EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope - - // SetModuleCallArguments defines values for the variables of a particular - // child module call. - // - // Calling this function multiple times has merging behavior, keeping any - // previously-set keys that are not present in the new map. - SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value) - - // Changes returns the writer object that can be used to write new proposed - // changes into the global changes set. - Changes() *plans.ChangesSync - - // State returns a wrapper object that provides safe concurrent access to - // the global state. - State() *states.SyncState -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go deleted file mode 100644 index bd414a96..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go +++ /dev/null @@ -1,329 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/version" - - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// BuiltinEvalContext is an EvalContext implementation that is used by -// Terraform by default. -type BuiltinEvalContext struct { - // StopContext is the context used to track whether we're complete - StopContext context.Context - - // PathValue is the Path that this context is operating within. - PathValue addrs.ModuleInstance - - // Evaluator is used for evaluating expressions within the scope of this - // eval context. - Evaluator *Evaluator - - // Schemas is a repository of all of the schemas we should need to - // decode configuration blocks and expressions. This must be constructed by - // the caller to include schemas for all of the providers, resource types, - // data sources and provisioners used by the given configuration and - // state. - // - // This must not be mutated during evaluation. - Schemas *Schemas - - // VariableValues contains the variable values across all modules. This - // structure is shared across the entire containing context, and so it - // may be accessed only when holding VariableValuesLock. - // The keys of the first level of VariableValues are the string - // representations of addrs.ModuleInstance values. The second-level keys - // are variable names within each module instance. - VariableValues map[string]map[string]cty.Value - VariableValuesLock *sync.Mutex - - Components contextComponentFactory - Hooks []Hook - InputValue UIInput - ProviderCache map[string]providers.Interface - ProviderInputConfig map[string]map[string]cty.Value - ProviderLock *sync.Mutex - ProvisionerCache map[string]provisioners.Interface - ProvisionerLock *sync.Mutex - ChangesValue *plans.ChangesSync - StateValue *states.SyncState - - once sync.Once -} - -// BuiltinEvalContext implements EvalContext -var _ EvalContext = (*BuiltinEvalContext)(nil) - -func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { - // This can happen during tests. During tests, we just block forever. - if ctx.StopContext == nil { - return nil - } - - return ctx.StopContext.Done() -} - -func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - for _, h := range ctx.Hooks { - action, err := fn(h) - if err != nil { - return err - } - - switch action { - case HookActionContinue: - continue - case HookActionHalt: - // Return an early exit error to trigger an early exit - log.Printf("[WARN] Early exit triggered by hook: %T", h) - return EvalEarlyExitError{} - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) Input() UIInput { - return ctx.InputValue -} - -func (ctx *BuiltinEvalContext) InitProvider(typeName string, addr addrs.ProviderConfig) (providers.Interface, error) { - ctx.once.Do(ctx.init) - absAddr := addr.Absolute(ctx.Path()) - - // If we already initialized, it is an error - if p := ctx.Provider(absAddr); p != nil { - return nil, fmt.Errorf("%s is already initialized", addr) - } - - // Warning: make sure to acquire these locks AFTER the call to Provider - // above, since it also acquires locks. - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - key := absAddr.String() - - p, err := ctx.Components.ResourceProvider(typeName, key) - if err != nil { - return nil, err - } - - log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", typeName, absAddr) - ctx.ProviderCache[key] = p - - return p, nil -} - -func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { - ctx.once.Do(ctx.init) - - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - return ctx.ProviderCache[addr.String()] -} - -func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { - ctx.once.Do(ctx.init) - - return ctx.Schemas.ProviderSchema(addr.ProviderConfig.Type) -} - -func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.ProviderConfig) error { - ctx.once.Do(ctx.init) - - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - key := addr.Absolute(ctx.Path()).String() - provider := ctx.ProviderCache[key] - if provider != nil { - delete(ctx.ProviderCache, key) - return provider.Close() - } - - return nil -} - -func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - absAddr := addr.Absolute(ctx.Path()) - p := ctx.Provider(absAddr) - if p == nil { - diags = diags.Append(fmt.Errorf("%s not initialized", addr)) - return diags - } - - providerSchema := ctx.ProviderSchema(absAddr) - if providerSchema == nil { - diags = diags.Append(fmt.Errorf("schema for %s is not available", absAddr)) - return diags - } - - req := providers.ConfigureRequest{ - TerraformVersion: version.String(), - Config: cfg, - } - - resp := p.Configure(req) - return resp.Diagnostics -} - -func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.ProviderConfig) map[string]cty.Value { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - if !ctx.Path().IsRoot() { - // Only root module provider configurations can have input. - return nil - } - - return ctx.ProviderInputConfig[pc.String()] -} - -func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.ProviderConfig, c map[string]cty.Value) { - absProvider := pc.Absolute(ctx.Path()) - - if !ctx.Path().IsRoot() { - // Only root module provider configurations can have input. - log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module") - return - } - - // Save the configuration - ctx.ProviderLock.Lock() - ctx.ProviderInputConfig[absProvider.String()] = c - ctx.ProviderLock.Unlock() -} - -func (ctx *BuiltinEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { - ctx.once.Do(ctx.init) - - // If we already initialized, it is an error - if p := ctx.Provisioner(n); p != nil { - return nil, fmt.Errorf("Provisioner '%s' already initialized", n) - } - - // Warning: make sure to acquire these locks AFTER the call to Provisioner - // above, since it also acquires locks. - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - key := PathObjectCacheKey(ctx.Path(), n) - - p, err := ctx.Components.ResourceProvisioner(n, key) - if err != nil { - return nil, err - } - - ctx.ProvisionerCache[key] = p - - return p, nil -} - -func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface { - ctx.once.Do(ctx.init) - - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - key := PathObjectCacheKey(ctx.Path(), n) - return ctx.ProvisionerCache[key] -} - -func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block { - ctx.once.Do(ctx.init) - - return ctx.Schemas.ProvisionerConfig(n) -} - -func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { - ctx.once.Do(ctx.init) - - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - key := PathObjectCacheKey(ctx.Path(), n) - - prov := ctx.ProvisionerCache[key] - if prov != nil { - return prov.Close() - } - - return nil -} - -func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - scope := ctx.EvaluationScope(self, keyData) - body, evalDiags := scope.ExpandBlock(body, schema) - diags = diags.Append(evalDiags) - val, evalDiags := scope.EvalBlock(body, schema) - diags = diags.Append(evalDiags) - return val, body, diags -} - -func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey) - return scope.EvalExpr(expr, wantType) -} - -func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { - data := &evaluationStateData{ - Evaluator: ctx.Evaluator, - ModulePath: ctx.PathValue, - InstanceKeyData: keyData, - Operation: ctx.Evaluator.Operation, - } - return ctx.Evaluator.Scope(data, self) -} - -func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { - return ctx.PathValue -} - -func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) { - ctx.VariableValuesLock.Lock() - defer ctx.VariableValuesLock.Unlock() - - childPath := n.ModuleInstance(ctx.PathValue) - key := childPath.String() - - args := ctx.VariableValues[key] - if args == nil { - args = make(map[string]cty.Value) - ctx.VariableValues[key] = vals - return - } - - for k, v := range vals { - args[k] = v - } -} - -func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { - return ctx.ChangesValue -} - -func (ctx *BuiltinEvalContext) State() *states.SyncState { - return ctx.StateValue -} - -func (ctx *BuiltinEvalContext) init() { -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go deleted file mode 100644 index 786316fb..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go +++ /dev/null @@ -1,319 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// MockEvalContext is a mock version of EvalContext that can be used -// for tests. -type MockEvalContext struct { - StoppedCalled bool - StoppedValue <-chan struct{} - - HookCalled bool - HookHook Hook - HookError error - - InputCalled bool - InputInput UIInput - - InitProviderCalled bool - InitProviderType string - InitProviderAddr addrs.ProviderConfig - InitProviderProvider providers.Interface - InitProviderError error - - ProviderCalled bool - ProviderAddr addrs.AbsProviderConfig - ProviderProvider providers.Interface - - ProviderSchemaCalled bool - ProviderSchemaAddr addrs.AbsProviderConfig - ProviderSchemaSchema *ProviderSchema - - CloseProviderCalled bool - CloseProviderAddr addrs.ProviderConfig - CloseProviderProvider providers.Interface - - ProviderInputCalled bool - ProviderInputAddr addrs.ProviderConfig - ProviderInputValues map[string]cty.Value - - SetProviderInputCalled bool - SetProviderInputAddr addrs.ProviderConfig - SetProviderInputValues map[string]cty.Value - - ConfigureProviderCalled bool - ConfigureProviderAddr addrs.ProviderConfig - ConfigureProviderConfig cty.Value - ConfigureProviderDiags tfdiags.Diagnostics - - InitProvisionerCalled bool - InitProvisionerName string - InitProvisionerProvisioner provisioners.Interface - InitProvisionerError error - - ProvisionerCalled bool - ProvisionerName string - ProvisionerProvisioner provisioners.Interface - - ProvisionerSchemaCalled bool - ProvisionerSchemaName string - ProvisionerSchemaSchema *configschema.Block - - CloseProvisionerCalled bool - CloseProvisionerName string - CloseProvisionerProvisioner provisioners.Interface - - EvaluateBlockCalled bool - EvaluateBlockBody hcl.Body - EvaluateBlockSchema *configschema.Block - EvaluateBlockSelf addrs.Referenceable - EvaluateBlockKeyData InstanceKeyEvalData - EvaluateBlockResultFunc func( - body hcl.Body, - schema *configschema.Block, - self addrs.Referenceable, - keyData InstanceKeyEvalData, - ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set - EvaluateBlockResult cty.Value - EvaluateBlockExpandedBody hcl.Body - EvaluateBlockDiags tfdiags.Diagnostics - - EvaluateExprCalled bool - EvaluateExprExpr hcl.Expression - EvaluateExprWantType cty.Type - EvaluateExprSelf addrs.Referenceable - EvaluateExprResultFunc func( - expr hcl.Expression, - wantType cty.Type, - self addrs.Referenceable, - ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set - EvaluateExprResult cty.Value - EvaluateExprDiags tfdiags.Diagnostics - - EvaluationScopeCalled bool - EvaluationScopeSelf addrs.Referenceable - EvaluationScopeKeyData InstanceKeyEvalData - EvaluationScopeScope *lang.Scope - - PathCalled bool - PathPath addrs.ModuleInstance - - SetModuleCallArgumentsCalled bool - SetModuleCallArgumentsModule addrs.ModuleCallInstance - SetModuleCallArgumentsValues map[string]cty.Value - - ChangesCalled bool - ChangesChanges *plans.ChangesSync - - StateCalled bool - StateState *states.SyncState -} - -// MockEvalContext implements EvalContext -var _ EvalContext = (*MockEvalContext)(nil) - -func (c *MockEvalContext) Stopped() <-chan struct{} { - c.StoppedCalled = true - return c.StoppedValue -} - -func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - c.HookCalled = true - if c.HookHook != nil { - if _, err := fn(c.HookHook); err != nil { - return err - } - } - - return c.HookError -} - -func (c *MockEvalContext) Input() UIInput { - c.InputCalled = true - return c.InputInput -} - -func (c *MockEvalContext) InitProvider(t string, addr addrs.ProviderConfig) (providers.Interface, error) { - c.InitProviderCalled = true - c.InitProviderType = t - c.InitProviderAddr = addr - return c.InitProviderProvider, c.InitProviderError -} - -func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { - c.ProviderCalled = true - c.ProviderAddr = addr - return c.ProviderProvider -} - -func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { - c.ProviderSchemaCalled = true - c.ProviderSchemaAddr = addr - return c.ProviderSchemaSchema -} - -func (c *MockEvalContext) CloseProvider(addr addrs.ProviderConfig) error { - c.CloseProviderCalled = true - c.CloseProviderAddr = addr - return nil -} - -func (c *MockEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { - c.ConfigureProviderCalled = true - c.ConfigureProviderAddr = addr - c.ConfigureProviderConfig = cfg - return c.ConfigureProviderDiags -} - -func (c *MockEvalContext) ProviderInput(addr addrs.ProviderConfig) map[string]cty.Value { - c.ProviderInputCalled = true - c.ProviderInputAddr = addr - return c.ProviderInputValues -} - -func (c *MockEvalContext) SetProviderInput(addr addrs.ProviderConfig, vals map[string]cty.Value) { - c.SetProviderInputCalled = true - c.SetProviderInputAddr = addr - c.SetProviderInputValues = vals -} - -func (c *MockEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { - c.InitProvisionerCalled = true - c.InitProvisionerName = n - return c.InitProvisionerProvisioner, c.InitProvisionerError -} - -func (c *MockEvalContext) Provisioner(n string) provisioners.Interface { - c.ProvisionerCalled = true - c.ProvisionerName = n - return c.ProvisionerProvisioner -} - -func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block { - c.ProvisionerSchemaCalled = true - c.ProvisionerSchemaName = n - return c.ProvisionerSchemaSchema -} - -func (c *MockEvalContext) CloseProvisioner(n string) error { - c.CloseProvisionerCalled = true - c.CloseProvisionerName = n - return nil -} - -func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - c.EvaluateBlockCalled = true - c.EvaluateBlockBody = body - c.EvaluateBlockSchema = schema - c.EvaluateBlockSelf = self - c.EvaluateBlockKeyData = keyData - if c.EvaluateBlockResultFunc != nil { - return c.EvaluateBlockResultFunc(body, schema, self, keyData) - } - return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags -} - -func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - c.EvaluateExprCalled = true - c.EvaluateExprExpr = expr - c.EvaluateExprWantType = wantType - c.EvaluateExprSelf = self - if c.EvaluateExprResultFunc != nil { - return c.EvaluateExprResultFunc(expr, wantType, self) - } - return c.EvaluateExprResult, c.EvaluateExprDiags -} - -// installSimpleEval is a helper to install a simple mock implementation of -// both EvaluateBlock and EvaluateExpr into the receiver. -// -// These default implementations will either evaluate the given input against -// the scope in field EvaluationScopeScope or, if it is nil, with no eval -// context at all so that only constant values may be used. -// -// This function overwrites any existing functions installed in fields -// EvaluateBlockResultFunc and EvaluateExprResultFunc. -func (c *MockEvalContext) installSimpleEval() { - c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - if scope := c.EvaluationScopeScope; scope != nil { - // Fully-functional codepath. - var diags tfdiags.Diagnostics - body, diags = scope.ExpandBlock(body, schema) - if diags.HasErrors() { - return cty.DynamicVal, body, diags - } - val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return cty.DynamicVal, body, diags - } - return val, body, diags - } - - // Fallback codepath supporting constant values only. - val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil) - return val, body, tfdiags.Diagnostics(nil).Append(hclDiags) - } - c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - if scope := c.EvaluationScopeScope; scope != nil { - // Fully-functional codepath. - return scope.EvalExpr(expr, wantType) - } - - // Fallback codepath supporting constant values only. - var diags tfdiags.Diagnostics - val, hclDiags := expr.Value(nil) - diags = diags.Append(hclDiags) - if hclDiags.HasErrors() { - return cty.DynamicVal, diags - } - var err error - val, err = convert.Convert(val, wantType) - if err != nil { - diags = diags.Append(err) - return cty.DynamicVal, diags - } - return val, diags - } -} - -func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { - c.EvaluationScopeCalled = true - c.EvaluationScopeSelf = self - c.EvaluationScopeKeyData = keyData - return c.EvaluationScopeScope -} - -func (c *MockEvalContext) Path() addrs.ModuleInstance { - c.PathCalled = true - return c.PathPath -} - -func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) { - c.SetModuleCallArgumentsCalled = true - c.SetModuleCallArgumentsModule = n - c.SetModuleCallArgumentsValues = values -} - -func (c *MockEvalContext) Changes() *plans.ChangesSync { - c.ChangesCalled = true - return c.ChangesChanges -} - -func (c *MockEvalContext) State() *states.SyncState { - c.StateCalled = true - return c.StateState -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go deleted file mode 100644 index 7d6fa491..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go +++ /dev/null @@ -1,120 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// evaluateResourceCountExpression is our standard mechanism for interpreting an -// expression given for a "count" argument on a resource. This should be called -// from the DynamicExpand of a node representing a resource in order to -// determine the final count value. -// -// If the result is zero or positive and no error diagnostics are returned, then -// the result is the literal count value to use. -// -// If the result is -1, this indicates that the given expression is nil and so -// the "count" behavior should not be enabled for this resource at all. -// -// If error diagnostics are returned then the result is always the meaningless -// placeholder value -1. -func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) { - count, known, diags := evaluateResourceCountExpressionKnown(expr, ctx) - if !known { - // Currently this is a rather bad outcome from a UX standpoint, since we have - // no real mechanism to deal with this situation and all we can do is produce - // an error message. - // FIXME: In future, implement a built-in mechanism for deferring changes that - // can't yet be predicted, and use it to guide the user through several - // plan/apply steps until the desired configuration is eventually reached. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`, - Subject: expr.Range().Ptr(), - }) - } - return count, diags -} - -// evaluateResourceCountExpressionKnown is like evaluateResourceCountExpression -// except that it handles an unknown result by returning count = 0 and -// a known = false, rather than by reporting the unknown value as an error -// diagnostic. -func evaluateResourceCountExpressionKnown(expr hcl.Expression, ctx EvalContext) (count int, known bool, diags tfdiags.Diagnostics) { - if expr == nil { - return -1, true, nil - } - - countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) - diags = diags.Append(countDiags) - if diags.HasErrors() { - return -1, true, diags - } - - switch { - case countVal.IsNull(): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is null. An integer is required.`, - Subject: expr.Range().Ptr(), - }) - return -1, true, diags - case !countVal.IsKnown(): - return 0, false, diags - } - - err := gocty.FromCtyValue(countVal, &count) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return -1, true, diags - } - if count < 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`, - Subject: expr.Range().Ptr(), - }) - return -1, true, diags - } - - return count, true, diags -} - -// fixResourceCountSetTransition is a helper function to fix up the state when a -// resource transitions its "count" from being set to unset or vice-versa, -// treating a 0-key and a no-key instance as aliases for one another across -// the transition. -// -// The correct time to call this function is in the DynamicExpand method for -// a node representing a resource, just after evaluating the count with -// evaluateResourceCountExpression, and before any other analysis of the -// state such as orphan detection. -// -// This function calls methods on the given EvalContext to update the current -// state in-place, if necessary. It is a no-op if there is no count transition -// taking place. -// -// Since the state is modified in-place, this function must take a writer lock -// on the state. The caller must therefore not also be holding a state lock, -// or this function will block forever awaiting the lock. -func fixResourceCountSetTransition(ctx EvalContext, addr addrs.AbsResource, countEnabled bool) { - state := ctx.State() - changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled) - if changed { - log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go deleted file mode 100644 index aac38063..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go +++ /dev/null @@ -1,77 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state -// when there is a resource count with zero/one boundary, i.e. fixing -// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. -// -// This works on the global state. -type EvalCountFixZeroOneBoundaryGlobal struct { - Config *configs.Config -} - -// TODO: test -func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { - // We'll temporarily lock the state to grab the modules, then work on each - // one separately while taking a lock again for each separate resource. - // This means that if another caller concurrently adds a module here while - // we're working then we won't update it, but that's no worse than the - // concurrent writer blocking for our entire fixup process and _then_ - // adding a new module, and in practice the graph node associated with - // this eval depends on everything else in the graph anyway, so there - // should not be concurrent writers. - state := ctx.State().Lock() - moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules)) - for _, m := range state.Modules { - moduleAddrs = append(moduleAddrs, m.Addr) - } - ctx.State().Unlock() - - for _, addr := range moduleAddrs { - cfg := n.Config.DescendentForInstance(addr) - if cfg == nil { - log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) - continue - } - if err := n.fixModule(ctx, addr); err != nil { - return nil, err - } - } - - return nil, nil -} - -func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error { - ms := ctx.State().Module(moduleAddr) - cfg := n.Config.DescendentForInstance(moduleAddr) - if ms == nil { - // Theoretically possible for a concurrent writer to delete a module - // while we're running, but in practice the graph node that called us - // depends on everything else in the graph and so there can never - // be a concurrent writer. - return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr) - } - if cfg == nil { - return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr) - } - - for _, r := range ms.Resources { - addr := r.Addr.Absolute(moduleAddr) - rCfg := cfg.Module.ResourceByAddr(r.Addr) - if rCfg == nil { - log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) - continue - } - hasCount := rCfg.Count != nil - fixResourceCountSetTransition(ctx, addr, hasCount) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go deleted file mode 100644 index d6f51c95..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go +++ /dev/null @@ -1,783 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalCheckPlannedChange is an EvalNode implementation that produces errors -// if the _actual_ expected value is not compatible with what was recorded -// in the plan. -// -// Errors here are most often indicative of a bug in the provider, so our -// error messages will report with that in mind. It's also possible that -// there's a bug in Terraform's Core's own "proposed new value" code in -// EvalDiff. -type EvalCheckPlannedChange struct { - Addr addrs.ResourceInstance - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - - // We take ResourceInstanceChange objects here just because that's what's - // convenient to pass in from the evaltree implementation, but we really - // only look at the "After" value of each change. - Planned, Actual **plans.ResourceInstanceChange -} - -func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) { - providerSchema := *n.ProviderSchema - plannedChange := *n.Planned - actualChange := *n.Actual - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type) - } - - var diags tfdiags.Diagnostics - absAddr := n.Addr.Absolute(ctx.Path()) - - log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) - - if plannedChange.Action != actualChange.Action { - switch { - case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: - // It's okay for an update to become a NoOp once we've filled in - // all of the unknown values, since the final values might actually - // match what was there before after all. - log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.ProviderConfig.Type, - plannedChange.Action, actualChange.Action, - ), - )) - } - } - - errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), - ), - )) - } - return nil, diags.Err() -} - -// EvalDiff is an EvalNode implementation that detects changes for a given -// resource instance. -type EvalDiff struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - State **states.ResourceInstanceObject - PreviousDiff **plans.ResourceInstanceChange - - // CreateBeforeDestroy is set if either the resource's own config sets - // create_before_destroy explicitly or if dependencies have forced the - // resource to be handled as create_before_destroy in order to avoid - // a dependency cycle. - CreateBeforeDestroy bool - - OutputChange **plans.ResourceInstanceChange - OutputValue *cty.Value - OutputState **states.ResourceInstanceObject - - Stub bool -} - -// TODO: test -func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - config := *n.Config - provider := *n.Provider - providerSchema := *n.ProviderSchema - - if providerSchema == nil { - return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr) - } - if n.ProviderAddr.ProviderConfig.Type == "" { - panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path()))) - } - - var diags tfdiags.Diagnostics - - // Evaluate the configuration - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - absAddr := n.Addr.Absolute(ctx.Path()) - var priorVal cty.Value - var priorValTainted cty.Value - var priorPrivate []byte - if state != nil { - if state.Status != states.ObjectTainted { - priorVal = state.Value - priorPrivate = state.Private - } else { - // If the prior state is tainted then we'll proceed below like - // we're creating an entirely new object, but then turn it into - // a synthetic "Replace" change at the end, creating the same - // result as if the provider had marked at least one argument - // change as "requires replacement". - priorValTainted = state.Value - priorVal = cty.NullVal(schema.ImpliedType()) - } - } else { - priorVal = cty.NullVal(schema.ImpliedType()) - } - - proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal) - - // Call pre-diff hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - } - - log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path())) - // Allow the provider to validate the final set of values. - // The config was statically validated early on, but there may have been - // unknown values which the provider could not validate at the time. - validateResp := provider.ValidateResourceTypeConfig( - providers.ValidateResourceTypeConfigRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }, - ) - if validateResp.Diagnostics.HasErrors() { - return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err() - } - - // The provider gets an opportunity to customize the proposed new value, - // which in turn produces the _planned_ new value. But before - // we send back this information, we need to process ignore_changes - // so that CustomizeDiff will not act on them - var ignoreChangeDiags tfdiags.Diagnostics - proposedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, proposedNewVal) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return nil, diags.Err() - } - - resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - PriorState: priorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: priorPrivate, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - - plannedNewVal := resp.PlannedState - plannedPrivate := resp.PlannedPrivate - - if plannedNewVal == cty.NilVal { - // Should never happen. Since real-world providers return via RPC a nil - // is always a bug in the client-side stub. This is more likely caused - // by an incompletely-configured mock provider in tests, though. - panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String())) - } - - // We allow the planned new value to disagree with configuration _values_ - // here, since that allows the provider to do special logic like a - // DiffSuppressFunc, but we still require that the provider produces - // a value whose type conforms to the schema. - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - return nil, diags.Err() - } - } - - // TODO: We should be able to remove this repeat of processing ignored changes - // after the plan, which helps providers relying on old behavior "just work" - // in the next major version, such that we can be stricter about ignore_changes - // values - plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, plannedNewVal) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return nil, diags.Err() - } - - // The provider produces a list of paths to attributes whose changes mean - // that we must replace rather than update an existing remote object. - // However, we only need to do that if the identified attributes _have_ - // actually changed -- particularly after we may have undone some of the - // changes in processIgnoreChanges -- so now we'll filter that list to - // include only where changes are detected. - reqRep := cty.NewPathSet() - if len(resp.RequiresReplace) > 0 { - for _, path := range resp.RequiresReplace { - if priorVal.IsNull() { - // If prior is null then we don't expect any RequiresReplace at all, - // because this is a Create action. - continue - } - - priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil) - plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) - if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { - // This means the path was invalid in both the prior and new - // values, which is an error with the provider itself. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, path, - ), - )) - continue - } - - // Make sure we have valid Values for both values. - // Note: if the opposing value was of the type - // cty.DynamicPseudoType, the type assigned here may not exactly - // match the schema. This is fine here, since we're only going to - // check for equality, but if the NullVal is to be used, we need to - // check the schema for th true type. - switch { - case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: - // this should never happen without ApplyPath errors above - panic("requires replace path returned 2 nil values") - case priorChangedVal == cty.NilVal: - priorChangedVal = cty.NullVal(plannedChangedVal.Type()) - case plannedChangedVal == cty.NilVal: - plannedChangedVal = cty.NullVal(priorChangedVal.Type()) - } - - eqV := plannedChangedVal.Equals(priorChangedVal) - if !eqV.IsKnown() || eqV.False() { - reqRep.Add(path) - } - } - if diags.HasErrors() { - return nil, diags.Err() - } - } - - eqV := plannedNewVal.Equals(priorVal) - eq := eqV.IsKnown() && eqV.True() - - var action plans.Action - switch { - case priorVal.IsNull(): - action = plans.Create - case eq: - action = plans.NoOp - case !reqRep.Empty(): - // If there are any "requires replace" paths left _after our filtering - // above_ then this is a replace action. - if n.CreateBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - default: - action = plans.Update - // "Delete" is never chosen here, because deletion plans are always - // created more directly elsewhere, such as in "orphan" handling. - } - - if action.IsReplace() { - // In this strange situation we want to produce a change object that - // shows our real prior object but has a _new_ object that is built - // from a null prior object, since we're going to delete the one - // that has all the computed values on it. - // - // Therefore we'll ask the provider to plan again here, giving it - // a null object for the prior, and then we'll meld that with the - // _actual_ prior state to produce a correctly-shaped replace change. - // The resulting change should show any computed attributes changing - // from known prior values to unknown values, unless the provider is - // able to predict new values for any of these computed attributes. - nullPriorVal := cty.NullVal(schema.ImpliedType()) - - // create a new proposed value from the null state and the config - proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal) - - resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - PriorState: nullPriorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: plannedPrivate, - }) - // We need to tread carefully here, since if there are any warnings - // in here they probably also came out of our previous call to - // PlanResourceChange above, and so we don't want to repeat them. - // Consequently, we break from the usual pattern here and only - // append these new diagnostics if there's at least one error inside. - if resp.Diagnostics.HasErrors() { - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - return nil, diags.Err() - } - plannedNewVal = resp.PlannedState - plannedPrivate = resp.PlannedPrivate - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - } - - // If our prior value was tainted then we actually want this to appear - // as a replace change, even though so far we've been treating it as a - // create. - if action == plans.Create && priorValTainted != cty.NilVal { - if n.CreateBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - priorVal = priorValTainted - } - - // As a special case, if we have a previous diff (presumably from the plan - // phases, whereas we're now in the apply phase) and it was for a replace, - // we've already deleted the original object from state by the time we - // get here and so we would've ended up with a _create_ action this time, - // which we now need to paper over to get a result consistent with what - // we originally intended. - if n.PreviousDiff != nil { - prevChange := *n.PreviousDiff - if prevChange.Action.IsReplace() && action == plans.Create { - log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action) - action = prevChange.Action - priorVal = prevChange.Before - } - } - - // Call post-refresh hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal) - }) - if err != nil { - return nil, err - } - } - - // Update our output if we care - if n.OutputChange != nil { - *n.OutputChange = &plans.ResourceInstanceChange{ - Addr: absAddr, - Private: plannedPrivate, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: action, - Before: priorVal, - After: plannedNewVal, - }, - RequiredReplace: reqRep, - } - } - - if n.OutputValue != nil { - *n.OutputValue = configVal - } - - // Update the state if we care - if n.OutputState != nil { - *n.OutputState = &states.ResourceInstanceObject{ - // We use the special "planned" status here to note that this - // object's value is not yet complete. Objects with this status - // cannot be used during expression evaluation, so the caller - // must _also_ record the returned change in the active plan, - // which the expression evaluator will use in preference to this - // incomplete value recorded in the state. - Status: states.ObjectPlanned, - Value: plannedNewVal, - Private: plannedPrivate, - } - } - - return nil, nil -} - -func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) { - // ignore_changes only applies when an object already exists, since we - // can't ignore changes to a thing we've not created yet. - if prior.IsNull() { - return proposed, nil - } - - ignoreChanges := n.Config.Managed.IgnoreChanges - ignoreAll := n.Config.Managed.IgnoreAllChanges - - if len(ignoreChanges) == 0 && !ignoreAll { - return proposed, nil - } - if ignoreAll { - return prior, nil - } - if prior.IsNull() || proposed.IsNull() { - // Ignore changes doesn't apply when we're creating for the first time. - // Proposed should never be null here, but if it is then we'll just let it be. - return proposed, nil - } - - return processIgnoreChangesIndividual(prior, proposed, ignoreChanges) -} - -func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) { - // When we walk below we will be using cty.Path values for comparison, so - // we'll convert our traversals here so we can compare more easily. - ignoreChangesPath := make([]cty.Path, len(ignoreChanges)) - for i, traversal := range ignoreChanges { - path := make(cty.Path, len(traversal)) - for si, step := range traversal { - switch ts := step.(type) { - case hcl.TraverseRoot: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseAttr: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseIndex: - path[si] = cty.IndexStep{ - Key: ts.Key, - } - default: - panic(fmt.Sprintf("unsupported traversal step %#v", step)) - } - } - ignoreChangesPath[i] = path - } - - var diags tfdiags.Diagnostics - ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) { - // First we must see if this is a path that's being ignored at all. - // We're looking for an exact match here because this walk will visit - // leaf values first and then their containers, and we want to do - // the "ignore" transform once we reach the point indicated, throwing - // away any deeper values we already produced at that point. - var ignoreTraversal hcl.Traversal - for i, candidate := range ignoreChangesPath { - if path.Equals(candidate) { - ignoreTraversal = ignoreChanges[i] - } - } - if ignoreTraversal == nil { - return v, nil - } - - // If we're able to follow the same path through the prior value, - // we'll take the value there instead, effectively undoing the - // change that was planned. - priorV, diags := hcl.ApplyPath(prior, path, nil) - if diags.HasErrors() { - // We just ignore the errors and move on here, since we assume it's - // just because the prior value was a slightly-different shape. - // It could potentially also be that the traversal doesn't match - // the schema, but we should've caught that during the validate - // walk if so. - return v, nil - } - return priorV, nil - }) - return ret, diags -} - -// EvalDiffDestroy is an EvalNode implementation that returns a plain -// destroy diff. -type EvalDiffDestroy struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - State **states.ResourceInstanceObject - ProviderAddr addrs.AbsProviderConfig - - Output **plans.ResourceInstanceChange - OutputState **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := *n.State - - if n.ProviderAddr.ProviderConfig.Type == "" { - if n.DeposedKey == "" { - panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr)) - } else { - panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey)) - } - } - - // If there is no state or our attributes object is null then we're already - // destroyed. - if state == nil || state.Value.IsNull() { - return nil, nil - } - - // Call pre-diff hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff( - absAddr, n.DeposedKey.Generation(), - state.Value, - cty.NullVal(cty.DynamicPseudoType), - ) - }) - if err != nil { - return nil, err - } - - // Change is always the same for a destroy. We don't need the provider's - // help for this one. - // TODO: Should we give the provider an opportunity to veto this? - change := &plans.ResourceInstanceChange{ - Addr: absAddr, - DeposedKey: n.DeposedKey, - Change: plans.Change{ - Action: plans.Delete, - Before: state.Value, - After: cty.NullVal(cty.DynamicPseudoType), - }, - Private: state.Private, - ProviderAddr: n.ProviderAddr, - } - - // Call post-diff hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff( - absAddr, - n.DeposedKey.Generation(), - change.Action, - change.Before, - change.After, - ) - }) - if err != nil { - return nil, err - } - - // Update our output - *n.Output = change - - if n.OutputState != nil { - // Record our proposed new state, which is nil because we're destroying. - *n.OutputState = nil - } - - return nil, nil -} - -// EvalReduceDiff is an EvalNode implementation that takes a planned resource -// instance change as might be produced by EvalDiff or EvalDiffDestroy and -// "simplifies" it to a single atomic action to be performed by a specific -// graph node. -// -// Callers must specify whether they are a destroy node or a regular apply -// node. If the result is NoOp then the given change requires no action for -// the specific graph node calling this and so evaluation of the that graph -// node should exit early and take no action. -// -// The object written to OutChange may either be identical to InChange or -// a new change object derived from InChange. Because of the former case, the -// caller must not mutate the object returned in OutChange. -type EvalReduceDiff struct { - Addr addrs.ResourceInstance - InChange **plans.ResourceInstanceChange - Destroy bool - OutChange **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) { - in := *n.InChange - out := in.Simplify(n.Destroy) - if n.OutChange != nil { - *n.OutChange = out - } - if out.Action != in.Action { - if n.Destroy { - log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action) - } else { - log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action) - } - } - return nil, nil -} - -// EvalReadDiff is an EvalNode implementation that retrieves the planned -// change for a particular resource instance object. -type EvalReadDiff struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - ProviderSchema **ProviderSchema - Change **plans.ResourceInstanceChange -} - -func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { - providerSchema := *n.ProviderSchema - changes := ctx.Changes() - addr := n.Addr.Absolute(ctx.Path()) - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - gen := states.CurrentGen - if n.DeposedKey != states.NotDeposed { - gen = n.DeposedKey - } - csrc := changes.GetResourceInstanceChange(addr, gen) - if csrc == nil { - log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr) - return nil, nil - } - - change, err := csrc.Decode(schema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err) - } - if n.Change != nil { - *n.Change = change - } - - log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr) - - return nil, nil -} - -// EvalWriteDiff is an EvalNode implementation that saves a planned change -// for an instance object into the set of global planned changes. -type EvalWriteDiff struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - ProviderSchema **ProviderSchema - Change **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { - changes := ctx.Changes() - addr := n.Addr.Absolute(ctx.Path()) - if n.Change == nil || *n.Change == nil { - // Caller sets nil to indicate that we need to remove a change from - // the set of changes. - gen := states.CurrentGen - if n.DeposedKey != states.NotDeposed { - gen = n.DeposedKey - } - changes.RemoveResourceInstanceChange(addr, gen) - return nil, nil - } - - providerSchema := *n.ProviderSchema - change := *n.Change - - if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey { - // Should never happen, and indicates a bug in the caller. - panic("inconsistent address and/or deposed key in EvalWriteDiff") - } - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - csrc, err := change.Encode(schema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err) - } - - changes.AppendResourceInstanceChange(csrc) - if n.DeposedKey == states.NotDeposed { - log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr) - } else { - log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go deleted file mode 100644 index 470f798b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// EvalReturnError is an EvalNode implementation that returns an -// error if it is present. -// -// This is useful for scenarios where an error has been captured by -// another EvalNode (like EvalApply) for special EvalTree-based error -// handling, and that handling has completed, so the error should be -// returned normally. -type EvalReturnError struct { - Error *error -} - -func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) { - if n.Error == nil { - return nil, nil - } - - return nil, *n.Error -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go deleted file mode 100644 index 711c625c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -// EvalNodeFilterFunc is the callback used to replace a node with -// another to node. To not do the replacement, just return the input node. -type EvalNodeFilterFunc func(EvalNode) EvalNode - -// EvalNodeFilterable is an interface that can be implemented by -// EvalNodes to allow filtering of sub-elements. Note that this isn't -// a common thing to implement and you probably don't need it. -type EvalNodeFilterable interface { - EvalNode - Filter(EvalNodeFilterFunc) -} - -// EvalFilter runs the filter on the given node and returns the -// final filtered value. This should be called rather than checking -// the EvalNode directly since this will properly handle EvalNodeFilterables. -func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode { - if f, ok := node.(EvalNodeFilterable); ok { - f.Filter(fn) - return node - } - - return fn(node) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go deleted file mode 100644 index 1a55f024..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go +++ /dev/null @@ -1,49 +0,0 @@ -package terraform - -// EvalNodeOpFilterable is an interface that EvalNodes can implement -// to be filterable by the operation that is being run on Terraform. -type EvalNodeOpFilterable interface { - IncludeInOp(walkOperation) bool -} - -// EvalNodeFilterOp returns a filter function that filters nodes that -// include themselves in specific operations. -func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc { - return func(n EvalNode) EvalNode { - include := true - if of, ok := n.(EvalNodeOpFilterable); ok { - include = of.IncludeInOp(op) - } - if include { - return n - } - - return EvalNoop{} - } -} - -// EvalOpFilter is an EvalNode implementation that is a proxy to -// another node but filters based on the operation. -type EvalOpFilter struct { - // Ops is the list of operations to include this node in. - Ops []walkOperation - - // Node is the node to execute - Node EvalNode -} - -// TODO: test -func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) { - return EvalRaw(n.Node, ctx) -} - -// EvalNodeOpFilterable impl. -func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool { - for _, v := range n.Ops { - if v == op { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go deleted file mode 100644 index a63389a9..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// evaluateResourceForEachExpression interprets a "for_each" argument on a resource. -// -// Returns a cty.Value map, and diagnostics if necessary. It will return nil if -// the expression is nil, and is used to distinguish between an unset for_each and an -// empty map -func evaluateResourceForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) { - forEachMap, known, diags := evaluateResourceForEachExpressionKnown(expr, ctx) - if !known { - // Attach a diag as we do with count, with the same downsides - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the for_each depends on.`, - Subject: expr.Range().Ptr(), - }) - } - return forEachMap, diags -} - -// evaluateResourceForEachExpressionKnown is like evaluateResourceForEachExpression -// except that it handles an unknown result by returning an empty map and -// a known = false, rather than by reporting the unknown value as an error -// diagnostic. -func evaluateResourceForEachExpressionKnown(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, known bool, diags tfdiags.Diagnostics) { - if expr == nil { - return nil, true, nil - } - - forEachVal, forEachDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) - diags = diags.Append(forEachDiags) - if diags.HasErrors() { - return nil, true, diags - } - - switch { - case forEachVal.IsNull(): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`, - Subject: expr.Range().Ptr(), - }) - return nil, true, diags - case !forEachVal.IsKnown(): - return map[string]cty.Value{}, false, diags - } - - if !forEachVal.CanIterateElements() || forEachVal.Type().IsListType() || forEachVal.Type().IsTupleType() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, forEachVal.Type().FriendlyName()), - Subject: expr.Range().Ptr(), - }) - return nil, true, diags - } - - // If the map is empty ({}), return an empty map, because cty will return nil when representing {} AsValueMap - // This also covers an empty set (toset([])) - if forEachVal.LengthInt() == 0 { - return map[string]cty.Value{}, true, diags - } - - if forEachVal.Type().IsSetType() { - if forEachVal.Type().ElementType() != cty.String { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each set argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()), - Subject: expr.Range().Ptr(), - }) - return nil, true, diags - } - - // A set may contain unknown values that must be - // discovered by checking with IsWhollyKnown (which iterates through the - // structure), while for maps in cty, keys can never be unknown or null, - // thus the earlier IsKnown check suffices for maps - if !forEachVal.IsWhollyKnown() { - return map[string]cty.Value{}, false, diags - } - } - - return forEachVal.AsValueMap(), true, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go deleted file mode 100644 index d6b46a1f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go +++ /dev/null @@ -1,26 +0,0 @@ -package terraform - -// EvalIf is an EvalNode that is a conditional. -type EvalIf struct { - If func(EvalContext) (bool, error) - Then EvalNode - Else EvalNode -} - -// TODO: test -func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) { - yes, err := n.If(ctx) - if err != nil { - return nil, err - } - - if yes { - return EvalRaw(n.Then, ctx) - } else { - if n.Else != nil { - return EvalRaw(n.Else, ctx) - } - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go deleted file mode 100644 index 25a2aae0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalImportState is an EvalNode implementation that performs an -// ImportState operation on a provider. This will return the imported -// states but won't modify any actual state. -type EvalImportState struct { - Addr addrs.ResourceInstance - Provider *providers.Interface - ID string - Output *[]providers.ImportedResource -} - -// TODO: test -func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - provider := *n.Provider - var diags tfdiags.Diagnostics - - { - // Call pre-import hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreImportState(absAddr, n.ID) - }) - if err != nil { - return nil, err - } - } - - resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: n.Addr.Resource.Type, - ID: n.ID, - }) - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.Err() - } - - imported := resp.ImportedResources - - for _, obj := range imported { - log.Printf("[TRACE] EvalImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName) - } - - if n.Output != nil { - *n.Output = imported - } - - { - // Call post-import hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostImportState(absAddr, imported) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalImportStateVerify verifies the state after ImportState and -// after the refresh to make sure it is non-nil and valid. -type EvalImportStateVerify struct { - Addr addrs.ResourceInstance - State **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - state := *n.State - if state.Value.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot import non-existent remote object", - fmt.Sprintf( - "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.", - n.Addr.String(), - ), - )) - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go deleted file mode 100644 index 5ab6b44f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go +++ /dev/null @@ -1,61 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// EvalConfigBlock is an EvalNode implementation that takes a raw -// configuration block and evaluates any expressions within it. -// -// ExpandedConfig is populated with the result of expanding any "dynamic" -// blocks in the given body, which can be useful for extracting correct source -// location information for specific attributes in the result. -type EvalConfigBlock struct { - Config *hcl.Body - Schema *configschema.Block - SelfAddr addrs.Referenceable - Output *cty.Value - ExpandedConfig *hcl.Body - ContinueOnErr bool -} - -func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) { - val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr, EvalDataForNoInstanceKey) - if diags.HasErrors() && n.ContinueOnErr { - log.Printf("[WARN] Block evaluation failed: %s", diags.Err()) - return nil, EvalEarlyExitError{} - } - - if n.Output != nil { - *n.Output = val - } - if n.ExpandedConfig != nil { - *n.ExpandedConfig = body - } - - return nil, diags.ErrWithWarnings() -} - -// EvalConfigExpr is an EvalNode implementation that takes a raw configuration -// expression and evaluates it. -type EvalConfigExpr struct { - Expr hcl.Expression - SelfAddr addrs.Referenceable - Output *cty.Value -} - -func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) { - val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr) - - if n.Output != nil { - *n.Output = val - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go deleted file mode 100644 index 03101938..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalLocal is an EvalNode implementation that evaluates the -// expression for a local value and writes it into a transient part of -// the state. -type EvalLocal struct { - Addr addrs.LocalValue - Expr hcl.Expression -} - -func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - // We ignore diags here because any problems we might find will be found - // again in EvaluateExpr below. - refs, _ := lang.ReferencesInExpr(n.Expr) - for _, ref := range refs { - if ref.Subject == n.Addr { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Self-referencing local value", - Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", n.Addr), - Subject: ref.SourceRange.ToHCL().Ptr(), - Context: n.Expr.Range().Ptr(), - }) - } - } - if diags.HasErrors() { - return nil, diags.Err() - } - - val, moreDiags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return nil, diags.Err() - } - - state := ctx.State() - if state == nil { - return nil, fmt.Errorf("cannot write local value to nil state") - } - - state.SetLocalValue(n.Addr.Absolute(ctx.Path()), val) - - return nil, nil -} - -// EvalDeleteLocal is an EvalNode implementation that deletes a Local value -// from the state. Locals aren't persisted, but we don't need to evaluate them -// during destroy. -type EvalDeleteLocal struct { - Addr addrs.LocalValue -} - -func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { - state := ctx.State() - if state == nil { - return nil, nil - } - - state.RemoveLocalValue(n.Addr.Absolute(ctx.Path())) - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go deleted file mode 100644 index f4bc8225..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go +++ /dev/null @@ -1,8 +0,0 @@ -package terraform - -// EvalNoop is an EvalNode that does nothing. -type EvalNoop struct{} - -func (EvalNoop) Eval(EvalContext) (interface{}, error) { - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go deleted file mode 100644 index 9f71e92f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go +++ /dev/null @@ -1,135 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// EvalDeleteOutput is an EvalNode implementation that deletes an output -// from the state. -type EvalDeleteOutput struct { - Addr addrs.OutputValue -} - -// TODO: test -func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { - state := ctx.State() - if state == nil { - return nil, nil - } - - state.RemoveOutputValue(n.Addr.Absolute(ctx.Path())) - return nil, nil -} - -// EvalWriteOutput is an EvalNode implementation that writes the output -// for the given name to the current state. -type EvalWriteOutput struct { - Addr addrs.OutputValue - Sensitive bool - Expr hcl.Expression - // ContinueOnErr allows interpolation to fail during Input - ContinueOnErr bool -} - -// TODO: test -func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { - addr := n.Addr.Absolute(ctx.Path()) - - // This has to run before we have a state lock, since evaluation also - // reads the state - val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) - // We'll handle errors below, after we have loaded the module. - - state := ctx.State() - if state == nil { - return nil, nil - } - - changes := ctx.Changes() // may be nil, if we're not working on a changeset - - // handling the interpolation error - if diags.HasErrors() { - if n.ContinueOnErr || flagWarnOutputErrors { - log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err()) - // if we're continuing, make sure the output is included, and - // marked as unknown. If the evaluator was able to find a type - // for the value in spite of the error then we'll use it. - n.setValue(addr, state, changes, cty.UnknownVal(val.Type())) - return nil, EvalEarlyExitError{} - } - return nil, diags.Err() - } - - n.setValue(addr, state, changes, val) - - return nil, nil -} - -func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { - if val.IsKnown() && !val.IsNull() { - // The state itself doesn't represent unknown values, so we null them - // out here and then we'll save the real unknown value in the planned - // changeset below, if we have one on this graph walk. - log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr) - stateVal := cty.UnknownAsNull(val) - state.SetOutputValue(addr, stateVal, n.Sensitive) - } else { - log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr) - state.RemoveOutputValue(addr) - } - - // If we also have an active changeset then we'll replicate the value in - // there. This is used in preference to the state where present, since it - // *is* able to represent unknowns, while the state cannot. - if changes != nil { - // For the moment we are not properly tracking changes to output - // values, and just marking them always as "Create" or "Destroy" - // actions. A future release will rework the output lifecycle so we - // can track their changes properly, in a similar way to how we work - // with resource instances. - - var change *plans.OutputChange - if !val.IsNull() { - change = &plans.OutputChange{ - Addr: addr, - Sensitive: n.Sensitive, - Change: plans.Change{ - Action: plans.Create, - Before: cty.NullVal(cty.DynamicPseudoType), - After: val, - }, - } - } else { - change = &plans.OutputChange{ - Addr: addr, - Sensitive: n.Sensitive, - Change: plans.Change{ - // This is just a weird placeholder delete action since - // we don't have an actual prior value to indicate. - // FIXME: Generate real planned changes for output values - // that include the old values. - Action: plans.Delete, - Before: cty.NullVal(cty.DynamicPseudoType), - After: cty.NullVal(cty.DynamicPseudoType), - }, - } - } - - cs, err := change.Encode() - if err != nil { - // Should never happen, since we just constructed this right above - panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err)) - } - log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr) - changes.RemoveOutputChange(addr) // remove any existing planned change, if present - changes.AppendOutputChange(cs) // add the new planned change - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go deleted file mode 100644 index 7440cff7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go +++ /dev/null @@ -1,147 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func buildProviderConfig(ctx EvalContext, addr addrs.ProviderConfig, config *configs.Provider) hcl.Body { - var configBody hcl.Body - if config != nil { - configBody = config.Config - } - - var inputBody hcl.Body - inputConfig := ctx.ProviderInput(addr) - if len(inputConfig) > 0 { - inputBody = configs.SynthBody("", inputConfig) - } - - switch { - case configBody != nil && inputBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr) - // Note that the inputBody is the _base_ here, because configs.MergeBodies - // expects the base have all of the required fields, while these are - // forced to be optional for the override. The input process should - // guarantee that we have a value for each of the required arguments and - // that in practice the sets of attributes in each body will be - // disjoint. - return configs.MergeBodies(inputBody, configBody) - case configBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr) - return configBody - case inputBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr) - return inputBody - default: - log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr) - return hcl.EmptyBody() - } -} - -// EvalConfigProvider is an EvalNode implementation that configures -// a provider that is already initialized and retrieved. -type EvalConfigProvider struct { - Addr addrs.ProviderConfig - Provider *providers.Interface - Config *configs.Provider -} - -func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil { - return nil, fmt.Errorf("EvalConfigProvider Provider is nil") - } - - var diags tfdiags.Diagnostics - provider := *n.Provider - config := n.Config - - configBody := buildProviderConfig(ctx, n.Addr, config) - - resp := provider.GetSchema() - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configSchema := resp.Provider.Block - configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configDiags := ctx.ConfigureProvider(n.Addr, configVal) - configDiags = configDiags.InConfigBody(configBody) - - return nil, configDiags.ErrWithWarnings() -} - -// EvalInitProvider is an EvalNode implementation that initializes a provider -// and returns nothing. The provider can be retrieved again with the -// EvalGetProvider node. -type EvalInitProvider struct { - TypeName string - Addr addrs.ProviderConfig -} - -func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvider(n.TypeName, n.Addr) -} - -// EvalCloseProvider is an EvalNode implementation that closes provider -// connections that aren't needed anymore. -type EvalCloseProvider struct { - Addr addrs.ProviderConfig -} - -func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvider(n.Addr) - return nil, nil -} - -// EvalGetProvider is an EvalNode implementation that retrieves an already -// initialized provider instance for the given name. -// -// Unlike most eval nodes, this takes an _absolute_ provider configuration, -// because providers can be passed into and inherited between modules. -// Resource nodes must therefore know the absolute path of the provider they -// will use, which is usually accomplished by implementing -// interface GraphNodeProviderConsumer. -type EvalGetProvider struct { - Addr addrs.AbsProviderConfig - Output *providers.Interface - - // If non-nil, Schema will be updated after eval to refer to the - // schema of the provider. - Schema **ProviderSchema -} - -func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { - if n.Addr.ProviderConfig.Type == "" { - // Should never happen - panic("EvalGetProvider used with uninitialized provider configuration address") - } - - result := ctx.Provider(n.Addr) - if result == nil { - return nil, fmt.Errorf("provider %s not initialized", n.Addr) - } - - if n.Output != nil { - *n.Output = result - } - - if n.Schema != nil { - *n.Schema = ctx.ProviderSchema(n.Addr) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go deleted file mode 100644 index 405ce9d0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go +++ /dev/null @@ -1,55 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" -) - -// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner -// and returns nothing. The provisioner can be retrieved again with the -// EvalGetProvisioner node. -type EvalInitProvisioner struct { - Name string -} - -func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvisioner(n.Name) -} - -// EvalCloseProvisioner is an EvalNode implementation that closes provisioner -// connections that aren't needed anymore. -type EvalCloseProvisioner struct { - Name string -} - -func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvisioner(n.Name) - return nil, nil -} - -// EvalGetProvisioner is an EvalNode implementation that retrieves an already -// initialized provisioner instance for the given name. -type EvalGetProvisioner struct { - Name string - Output *provisioners.Interface - Schema **configschema.Block -} - -func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { - result := ctx.Provisioner(n.Name) - if result == nil { - return nil, fmt.Errorf("provisioner %s not initialized", n.Name) - } - - if n.Output != nil { - *n.Output = result - } - - if n.Schema != nil { - *n.Schema = ctx.ProvisionerSchema(n.Name) - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go deleted file mode 100644 index 0b734b79..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go +++ /dev/null @@ -1,395 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalReadData is an EvalNode implementation that deals with the main part -// of the data resource lifecycle: either actually reading from the data source -// or generating a plan to do so. -type EvalReadData struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Dependencies []addrs.Referenceable - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - - // Planned is set when dealing with data resources that were deferred to - // the apply walk, to let us see what was planned. If this is set, the - // evaluation of the config is required to produce a wholly-known - // configuration which is consistent with the partial object included - // in this planned change. - Planned **plans.ResourceInstanceChange - - // ForcePlanRead, if true, overrides the usual behavior of immediately - // reading from the data source where possible, instead forcing us to - // _always_ generate a plan. This is used during the plan walk, since we - // mustn't actually apply anything there. (The resulting state doesn't - // get persisted) - ForcePlanRead bool - - // The result from this EvalNode has a few different possibilities - // depending on the input: - // - If Planned is nil then we assume we're aiming to _produce_ the plan, - // and so the following two outcomes are possible: - // - OutputChange.Action is plans.NoOp and OutputState is the complete - // result of reading from the data source. This is the easy path. - // - OutputChange.Action is plans.Read and OutputState is a planned - // object placeholder (states.ObjectPlanned). In this case, the - // returned change must be recorded in the overral changeset and - // eventually passed to another instance of this struct during the - // apply walk. - // - If Planned is non-nil then we assume we're aiming to complete a - // planned read from an earlier plan walk. In this case the only possible - // non-error outcome is to set Output.Action (if non-nil) to a plans.NoOp - // change and put the complete resulting state in OutputState, ready to - // be saved in the overall state and used for expression evaluation. - OutputChange **plans.ResourceInstanceChange - OutputValue *cty.Value - OutputConfigValue *cty.Value - OutputState **states.ResourceInstanceObject -} - -func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadData: working on %s", absAddr) - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema not available for %s", n.Addr) - } - - var diags tfdiags.Diagnostics - var change *plans.ResourceInstanceChange - var configVal cty.Value - - // TODO: Do we need to handle Delete changes here? EvalReadDataDiff and - // EvalReadDataApply did, but it seems like we should handle that via a - // separate mechanism since it boils down to just deleting the object from - // the state... and we do that on every plan anyway, forcing the data - // resource to re-read. - - config := *n.Config - provider := *n.Provider - providerSchema := *n.ProviderSchema - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.ProviderConfig.Type, n.Addr.Resource.Type) - } - - // We'll always start by evaluating the configuration. What we do after - // that will depend on the evaluation result along with what other inputs - // we were given. - objTy := schema.ImpliedType() - priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time - - forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - - var configDiags tfdiags.Diagnostics - configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal) - - // If our configuration contains any unknown values then we must defer the - // read to the apply phase by producing a "Read" change for this resource, - // and a placeholder value for it in the state. - if n.ForcePlanRead || !configVal.IsWhollyKnown() { - // If the configuration is still unknown when we're applying a planned - // change then that indicates a bug in Terraform, since we should have - // everything resolved by now. - if n.Planned != nil && *n.Planned != nil { - return nil, fmt.Errorf( - "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", - absAddr, - ) - } - if n.ForcePlanRead { - log.Printf("[TRACE] EvalReadData: %s configuration is fully known, but we're forcing a read plan to be created", absAddr) - } else { - log.Printf("[TRACE] EvalReadData: %s configuration not fully known yet, so deferring to apply phase", absAddr) - } - - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - - change = &plans.ResourceInstanceChange{ - Addr: absAddr, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: plans.Read, - Before: priorVal, - After: proposedNewVal, - }, - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, change.Action, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - - if n.OutputChange != nil { - *n.OutputChange = change - } - if n.OutputValue != nil { - *n.OutputValue = change.After - } - if n.OutputConfigValue != nil { - *n.OutputConfigValue = configVal - } - if n.OutputState != nil { - state := &states.ResourceInstanceObject{ - Value: change.After, - Status: states.ObjectPlanned, // because the partial value in the plan must be used for now - Dependencies: n.Dependencies, - } - *n.OutputState = state - } - - return nil, diags.ErrWithWarnings() - } - - if n.Planned != nil && *n.Planned != nil && (*n.Planned).Action != plans.Read { - // If any other action gets in here then that's always a bug; this - // EvalNode only deals with reading. - return nil, fmt.Errorf( - "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", - (*n.Planned).Action, absAddr, - ) - } - - log.Printf("[TRACE] Re-validating config for %s", absAddr) - validateResp := provider.ValidateDataSourceConfig( - providers.ValidateDataSourceConfigRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }, - ) - if validateResp.Diagnostics.HasErrors() { - return nil, validateResp.Diagnostics.InConfigBody(n.Config.Config).Err() - } - - // If we get down here then our configuration is complete and we're read - // to actually call the provider to read the data. - log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr) - - err := ctx.Hook(func(h Hook) (HookAction, error) { - // We don't have a state yet, so we'll just give the hook an - // empty one to work with. - return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) - }) - if err != nil { - return nil, err - } - - resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - newVal := resp.State - if newVal == cty.NilVal { - // This can happen with incompletely-configured mocks. We'll allow it - // and treat it as an alias for a properly-typed null value. - newVal = cty.NullVal(schema.ImpliedType()) - } - - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - if newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced null object", - fmt.Sprintf( - "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, - ), - )) - } - if !newVal.IsWhollyKnown() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, - ), - )) - - // We'll still save the object, but we need to eliminate any unknown - // values first because we can't serialize them in the state file. - // Note that this may cause set elements to be coalesced if they - // differed only by having unknown values, but we don't worry about - // that here because we're saving the value only for inspection - // purposes; the error we added above will halt the graph walk. - newVal = cty.UnknownAsNull(newVal) - } - - // Since we've completed the read, we actually have no change to make, but - // we'll produce a NoOp one anyway to preserve the usual flow of the - // plan phase and allow it to produce a complete plan. - change = &plans.ResourceInstanceChange{ - Addr: absAddr, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: plans.NoOp, - Before: newVal, - After: newVal, - }, - } - state := &states.ResourceInstanceObject{ - Value: change.After, - Status: states.ObjectReady, // because we completed the read from the provider - Dependencies: n.Dependencies, - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) - }) - if err != nil { - return nil, err - } - - if n.OutputChange != nil { - *n.OutputChange = change - } - if n.OutputValue != nil { - *n.OutputValue = change.After - } - if n.OutputConfigValue != nil { - *n.OutputConfigValue = configVal - } - if n.OutputState != nil { - *n.OutputState = state - } - - return nil, diags.ErrWithWarnings() -} - -// EvalReadDataApply is an EvalNode implementation that executes a data -// resource's ReadDataApply method to read data from the data source. -type EvalReadDataApply struct { - Addr addrs.ResourceInstance - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - Output **states.ResourceInstanceObject - Config *configs.Resource - Change **plans.ResourceInstanceChange - StateReferences []addrs.Referenceable -} - -func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { - provider := *n.Provider - change := *n.Change - providerSchema := *n.ProviderSchema - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - - // If the diff is for *destroying* this resource then we'll - // just drop its state and move on, since data resources don't - // support an actual "destroy" action. - if change != nil && change.Action == plans.Delete { - if n.Output != nil { - *n.Output = nil - } - return nil, nil - } - - // For the purpose of external hooks we present a data apply as a - // "Refresh" rather than an "Apply" because creating a data source - // is presented to users/callers as a "read" operation. - err := ctx.Hook(func(h Hook) (HookAction, error) { - // We don't have a state yet, so we'll just give the hook an - // empty one to work with. - return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) - }) - if err != nil { - return nil, err - } - - resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: n.Addr.Resource.Type, - Config: change.After, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type) - } - - newVal := resp.State - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q planned an invalid value for %s. The result could not be saved.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) - }) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = &states.ResourceInstanceObject{ - Value: newVal, - Status: states.ObjectReady, - Dependencies: n.StateReferences, - } - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go deleted file mode 100644 index 6a834445..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go +++ /dev/null @@ -1,106 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalRefresh is an EvalNode implementation that does a refresh for -// a resource. -type EvalRefresh struct { - Addr addrs.ResourceInstance - ProviderAddr addrs.AbsProviderConfig - Provider *providers.Interface - ProviderSchema **ProviderSchema - State **states.ResourceInstanceObject - Output **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - - // If we have no state, we don't do any refreshing - if state == nil { - log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path())) - return nil, diags.ErrWithWarnings() - } - - schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - // Call pre-refresh hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(absAddr, states.CurrentGen, state.Value) - }) - if err != nil { - return nil, diags.ErrWithWarnings() - } - - // Refresh! - priorVal := state.Value - req := providers.ReadResourceRequest{ - TypeName: n.Addr.Resource.Type, - PriorState: priorVal, - Private: state.Private, - } - - provider := *n.Provider - resp := provider.ReadResource(req) - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.Err() - } - - if resp.NewState == cty.NilVal { - // This ought not to happen in real cases since it's not possible to - // send NilVal over the plugin RPC channel, but it can come up in - // tests due to sloppy mocking. - panic("new state is cty.NilVal") - } - - for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - newState := state.DeepCopy() - newState.Value = resp.NewState - newState.Private = resp.Private - - // Call post-refresh hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value) - }) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = newState - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go deleted file mode 100644 index 7d6bb660..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go +++ /dev/null @@ -1,42 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalSequence is an EvalNode that evaluates in sequence. -type EvalSequence struct { - Nodes []EvalNode -} - -func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - for _, n := range n.Nodes { - if n == nil { - continue - } - - if _, err := EvalRaw(n, ctx); err != nil { - if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit { - // In this path we abort early, losing any non-error - // diagnostics we saw earlier. - return nil, err - } - diags = diags.Append(err) - if diags.HasErrors() { - // Halt if we get some errors, but warnings are okay. - break - } - } - } - - return nil, diags.ErrWithWarnings() -} - -// EvalNodeFilterable impl. -func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) { - for i, node := range n.Nodes { - n.Nodes[i] = fn(node) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go deleted file mode 100644 index 70a72bbd..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go +++ /dev/null @@ -1,475 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalReadState is an EvalNode implementation that reads the -// current object for a specific instance in the state. -type EvalReadState struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // ProviderSchema is the schema for the provider given in Provider. - ProviderSchema **ProviderSchema - - // Provider is the provider that will subsequently perform actions on - // the the state object. This is used to perform any schema upgrades - // that might be required to prepare the stored data for use. - Provider *providers.Interface - - // Output will be written with a pointer to the retrieved object. - Output **states.ResourceInstanceObject -} - -func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil || *n.Provider == nil { - panic("EvalReadState used with no Provider object") - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - panic("EvalReadState used with no ProviderSchema object") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr) - - src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr) - return nil, nil - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) - } - var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags.Err() - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = obj - } - return obj, nil -} - -// EvalReadStateDeposed is an EvalNode implementation that reads the -// deposed InstanceState for a specific resource out of the state -type EvalReadStateDeposed struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // Key identifies which deposed object we will read. - Key states.DeposedKey - - // ProviderSchema is the schema for the provider given in Provider. - ProviderSchema **ProviderSchema - - // Provider is the provider that will subsequently perform actions on - // the the state object. This is used to perform any schema upgrades - // that might be required to prepare the stored data for use. - Provider *providers.Interface - - // Output will be written with a pointer to the retrieved object. - Output **states.ResourceInstanceObject -} - -func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil || *n.Provider == nil { - panic("EvalReadStateDeposed used with no Provider object") - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - panic("EvalReadStateDeposed used with no ProviderSchema object") - } - - key := n.Key - if key == states.NotDeposed { - return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported") - } - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key) - - src := ctx.State().ResourceInstanceObject(absAddr, key) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key) - return nil, nil - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) - } - var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags.Err() - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - if n.Output != nil { - *n.Output = obj - } - return obj, nil -} - -// EvalRequireState is an EvalNode implementation that exits early if the given -// object is null. -type EvalRequireState struct { - State **states.ResourceInstanceObject -} - -func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - return nil, EvalEarlyExitError{} - } - - state := *n.State - if state == nil || state.Value.IsNull() { - return nil, EvalEarlyExitError{} - } - - return nil, nil -} - -// EvalUpdateStateHook is an EvalNode implementation that calls the -// PostStateUpdate hook with the current state. -type EvalUpdateStateHook struct{} - -func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { - // In principle we could grab the lock here just long enough to take a - // deep copy and then pass that to our hooks below, but we'll instead - // hold the hook for the duration to avoid the potential confusing - // situation of us racing to call PostStateUpdate concurrently with - // different state snapshots. - stateSync := ctx.State() - state := stateSync.Lock().DeepCopy() - defer stateSync.Unlock() - - // Call the hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostStateUpdate(state) - }) - if err != nil { - return nil, err - } - - return nil, nil -} - -// EvalWriteState is an EvalNode implementation that saves the given object -// as the current object for the selected resource instance. -type EvalWriteState struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // State is the object state to save. - State **states.ResourceInstanceObject - - // ProviderSchema is the schema for the provider given in ProviderAddr. - ProviderSchema **ProviderSchema - - // ProviderAddr is the address of the provider configuration that - // produced the given object. - ProviderAddr addrs.AbsProviderConfig -} - -func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - // Note that a pointer _to_ nil is valid here, indicating the total - // absense of an object as we'd see during destroy. - panic("EvalWriteState used with no ResourceInstanceObject") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - if n.ProviderAddr.ProviderConfig.Type == "" { - return nil, fmt.Errorf("failed to write state for %s, missing provider type", absAddr) - } - - obj := *n.State - if obj == nil || obj.Value.IsNull() { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr) - log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr) - return nil, nil - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - // Should never happen, unless our state object is nil - panic("EvalWriteState used with pointer to nil ProviderSchema object") - } - - if obj != nil { - log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr) - } else { - log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr) - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr) - return nil, nil -} - -// EvalWriteStateDeposed is an EvalNode implementation that writes -// an InstanceState out to the Deposed list of a resource in the state. -type EvalWriteStateDeposed struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // Key indicates which deposed object to write to. - Key states.DeposedKey - - // State is the object state to save. - State **states.ResourceInstanceObject - - // ProviderSchema is the schema for the provider given in ProviderAddr. - ProviderSchema **ProviderSchema - - // ProviderAddr is the address of the provider configuration that - // produced the given object. - ProviderAddr addrs.AbsProviderConfig -} - -func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - // Note that a pointer _to_ nil is valid here, indicating the total - // absense of an object as we'd see during destroy. - panic("EvalWriteStateDeposed used with no ResourceInstanceObject") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - key := n.Key - state := ctx.State() - - if key == states.NotDeposed { - // should never happen - return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) - } - - obj := *n.State - if obj == nil { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr) - log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key) - return nil, nil - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - // Should never happen, unless our state object is nil - panic("EvalWriteStateDeposed used with no ProviderSchema object") - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key) - state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr) - return nil, nil -} - -// EvalDeposeState is an EvalNode implementation that moves the current object -// for the given instance to instead be a deposed object, leaving the instance -// with no current object. -// This is used at the beginning of a create-before-destroy replace action so -// that the create can create while preserving the old state of the -// to-be-destroyed object. -type EvalDeposeState struct { - Addr addrs.ResourceInstance - - // ForceKey, if a value other than states.NotDeposed, will be used as the - // key for the newly-created deposed object that results from this action. - // If set to states.NotDeposed (the zero value), a new unique key will be - // allocated. - ForceKey states.DeposedKey - - // OutputKey, if non-nil, will be written with the deposed object key that - // was generated for the object. This can then be passed to - // EvalUndeposeState.Key so it knows which deposed instance to forget. - OutputKey *states.DeposedKey -} - -// TODO: test -func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - var key states.DeposedKey - if n.ForceKey == states.NotDeposed { - key = state.DeposeResourceInstanceObject(absAddr) - } else { - key = n.ForceKey - state.DeposeResourceInstanceObjectForceKey(absAddr, key) - } - log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key) - - if n.OutputKey != nil { - *n.OutputKey = key - } - - return nil, nil -} - -// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will -// restore a particular deposed object of the specified resource instance -// to be the "current" object if and only if the instance doesn't currently -// have a current object. -// -// This is intended for use when the create leg of a create before destroy -// fails with no partial new object: if we didn't take any action, the user -// would be left in the unfortunate situation of having no current object -// and the previously-workign object now deposed. This EvalNode causes a -// better outcome by restoring things to how they were before the replace -// operation began. -// -// The create operation may have produced a partial result even though it -// failed and it's important that we don't "forget" that state, so in that -// situation the prior object remains deposed and the partial new object -// remains the current object, allowing the situation to hopefully be -// improved in a subsequent run. -type EvalMaybeRestoreDeposedObject struct { - Addr addrs.ResourceInstance - - // Key is a pointer to the deposed object key that should be forgotten - // from the state, which must be non-nil. - Key *states.DeposedKey -} - -// TODO: test -func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - dk := *n.Key - state := ctx.State() - - restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk) - if restored { - log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk) - } else { - log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk) - } - - return nil, nil -} - -// EvalWriteResourceState is an EvalNode implementation that ensures that -// a suitable resource-level state record is present in the state, if that's -// required for the "each mode" of that resource. -// -// This is important primarily for the situation where count = 0, since this -// eval is the only change we get to set the resource "each mode" to list -// in that case, allowing expression evaluation to see it as a zero-element -// list rather than as not set at all. -type EvalWriteResourceState struct { - Addr addrs.Resource - Config *configs.Resource - ProviderAddr addrs.AbsProviderConfig -} - -// TODO: test -func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - eachMode := states.NoEach - if count >= 0 { // -1 signals "count not set" - eachMode = states.EachList - } - - forEach, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - diags = diags.Append(forEachDiags) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - if forEach != nil { - eachMode = states.EachMap - } - - // This method takes care of all of the business logic of updating this - // while ensuring that any existing instances are preserved, etc. - state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr) - - return nil, nil -} - -// EvalForgetResourceState is an EvalNode implementation that prunes out an -// empty resource-level state for a given resource address, or produces an -// error if it isn't empty after all. -// -// This should be the last action taken for a resource that has been removed -// from the configuration altogether, to clean up the leftover husk of the -// resource in the state after other EvalNodes have destroyed and removed -// all of the instances and instance objects beneath it. -type EvalForgetResourceState struct { - Addr addrs.Resource -} - -func (n *EvalForgetResourceState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - pruned := state.RemoveResourceIfEmpty(absAddr) - if !pruned { - // If this produces an error, it indicates a bug elsewhere in Terraform - // -- probably missing graph nodes, graph edges, or - // incorrectly-implemented evaluation steps. - return nil, fmt.Errorf("orphan resource %s still has a non-empty state after apply; this is a bug in Terraform", absAddr) - } - log.Printf("[TRACE] EvalForgetResourceState: Pruned husk of %s from state", absAddr) - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go deleted file mode 100644 index 27d5f212..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go +++ /dev/null @@ -1,106 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// UpgradeResourceState will, if necessary, run the provider-defined upgrade -// logic against the given state object to make it compliant with the -// current schema version. This is a no-op if the given state object is -// already at the latest version. -// -// If any errors occur during upgrade, error diagnostics are returned. In that -// case it is not safe to proceed with using the original state object. -func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { - if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { - // We only do state upgrading for managed resources. - return src, nil - } - - stateIsFlatmap := len(src.AttrsJSON) == 0 - - providerType := addr.Resource.Resource.DefaultProviderConfig().Type - if src.SchemaVersion > currentVersion { - log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource instance managed by newer provider version", - // This is not a very good error message, but we don't retain enough - // information in state to give good feedback on what provider - // version might be required here. :( - fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType), - )) - return nil, diags - } - - // If we get down here then we need to upgrade the state, with the - // provider's help. - // If this state was originally created by a version of Terraform prior to - // v0.12, this also includes translating from legacy flatmap to new-style - // representation, since only the provider has enough information to - // understand a flatmap built against an older schema. - if src.SchemaVersion != currentVersion { - log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) - } else { - log.Printf("[TRACE] UpgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) - } - - req := providers.UpgradeResourceStateRequest{ - TypeName: addr.Resource.Resource.Type, - - // TODO: The internal schema version representations are all using - // uint64 instead of int64, but unsigned integers aren't friendly - // to all protobuf target languages so in practice we use int64 - // on the wire. In future we will change all of our internal - // representations to int64 too. - Version: int64(src.SchemaVersion), - } - - if stateIsFlatmap { - req.RawStateFlatmap = src.AttrsFlat - } else { - req.RawStateJSON = src.AttrsJSON - } - - resp := provider.UpgradeResourceState(req) - diags := resp.Diagnostics - if diags.HasErrors() { - return nil, diags - } - - // After upgrading, the new value must conform to the current schema. When - // going over RPC this is actually already ensured by the - // marshaling/unmarshaling of the new value, but we'll check it here - // anyway for robustness, e.g. for in-process providers. - newValue := resp.UpgradedState - if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource state upgrade", - fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)), - )) - } - return nil, diags - } - - new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion)) - if err != nil { - // We already checked for type conformance above, so getting into this - // codepath should be rare and is probably a bug somewhere under CompleteUpgrade. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to encode result of resource state upgrade", - fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)), - )) - } - return new, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go deleted file mode 100644 index a4f28bd9..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go +++ /dev/null @@ -1,588 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/gocty" -) - -// EvalValidateCount is an EvalNode implementation that validates -// the count of a resource. -type EvalValidateCount struct { - Resource *configs.Resource -} - -// TODO: test -func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - var count int - var err error - - val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - goto RETURN - } - if val.IsNull() || !val.IsKnown() { - goto RETURN - } - - err = gocty.FromCtyValue(val, &count) - if err != nil { - // The EvaluateExpr call above already guaranteed us a number value, - // so if we end up here then we have something that is out of range - // for an int, and the error message will include a description of - // the valid range. - rawVal := val.AsBigFloat() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count value", - Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err), - Subject: n.Resource.Count.Range().Ptr(), - }) - } else if count < 0 { - rawVal := val.AsBigFloat() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count value", - Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal), - Subject: n.Resource.Count.Range().Ptr(), - }) - } - -RETURN: - return nil, diags.NonFatalErr() -} - -// EvalValidateProvider is an EvalNode implementation that validates -// a provider configuration. -type EvalValidateProvider struct { - Addr addrs.ProviderConfig - Provider *providers.Interface - Config *configs.Provider -} - -func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - provider := *n.Provider - - configBody := buildProviderConfig(ctx, n.Addr, n.Config) - - resp := provider.GetSchema() - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configSchema := resp.Provider.Block - if configSchema == nil { - // Should never happen in real code, but often comes up in tests where - // mock schemas are being used that tend to be incomplete. - log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr) - configSchema = &configschema.Block{} - } - - configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return nil, diags.NonFatalErr() - } - - req := providers.PrepareProviderConfigRequest{ - Config: configVal, - } - - validateResp := provider.PrepareProviderConfig(req) - diags = diags.Append(validateResp.Diagnostics) - - return nil, diags.NonFatalErr() -} - -// EvalValidateProvisioner is an EvalNode implementation that validates -// the configuration of a provisioner belonging to a resource. The provisioner -// config is expected to contain the merged connection configurations. -type EvalValidateProvisioner struct { - ResourceAddr addrs.Resource - Provisioner *provisioners.Interface - Schema **configschema.Block - Config *configs.Provisioner - ResourceHasCount bool - ResourceHasForEach bool -} - -func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { - provisioner := *n.Provisioner - config := *n.Config - schema := *n.Schema - - var diags tfdiags.Diagnostics - - { - // Validate the provisioner's own config first - - configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - if configVal == cty.NilVal { - // Should never happen for a well-behaved EvaluateBlock implementation - return nil, fmt.Errorf("EvaluateBlock returned nil value") - } - - req := provisioners.ValidateProvisionerConfigRequest{ - Config: configVal, - } - - resp := provisioner.ValidateProvisionerConfig(req) - diags = diags.Append(resp.Diagnostics) - } - - { - // Now validate the connection config, which contains the merged bodies - // of the resource and provisioner connection blocks. - connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr) - diags = diags.Append(connDiags) - } - - return nil, diags.NonFatalErr() -} - -func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics { - // We can't comprehensively validate the connection config since its - // final structure is decided by the communicator and we can't instantiate - // that until we have a complete instance state. However, we *can* catch - // configuration keys that are not valid for *any* communicator, catching - // typos early rather than waiting until we actually try to run one of - // the resource's provisioners. - - var diags tfdiags.Diagnostics - - if config == nil || config.Config == nil { - // No block to validate - return diags - } - - // We evaluate here just by evaluating the block and returning any - // diagnostics we get, since evaluation alone is enough to check for - // extraneous arguments and incorrectly-typed arguments. - _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema) - diags = diags.Append(configDiags) - - return diags -} - -func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - keyData := EvalDataForNoInstanceKey - selfAddr := n.ResourceAddr.Instance(addrs.NoKey) - - if n.ResourceHasCount { - // For a resource that has count, we allow count.index but don't - // know at this stage what it will return. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // "self" can't point to an unknown key, but we'll force it to be - // key 0 here, which should return an unknown value of the - // expected type since none of these elements are known at this - // point anyway. - selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0)) - } else if n.ResourceHasForEach { - // For a resource that has for_each, we allow each.value and each.key - // but don't know at this stage what it will return. - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.DynamicVal, - } - - // "self" can't point to an unknown key, but we'll force it to be - // key "" here, which should return an unknown value of the - // expected type since none of these elements are known at - // this point anyway. - selfAddr = n.ResourceAddr.Instance(addrs.StringKey("")) - } - - return ctx.EvaluateBlock(body, schema, selfAddr, keyData) -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. Once that is done, we can remove -// this and use a type-specific schema from the communicator to validate -// exactly what is expected for a given connection type. -var connectionBlockSupersetSchema = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - // NOTE: "type" is not included here because it's treated special - // by the config loader and stored away in a separate field. - - // Common attributes for both connection types - "host": { - Type: cty.String, - Required: true, - }, - "type": { - Type: cty.String, - Optional: true, - }, - "user": { - Type: cty.String, - Optional: true, - }, - "password": { - Type: cty.String, - Optional: true, - }, - "port": { - Type: cty.String, - Optional: true, - }, - "timeout": { - Type: cty.String, - Optional: true, - }, - "script_path": { - Type: cty.String, - Optional: true, - }, - - // For type=ssh only (enforced in ssh communicator) - "private_key": { - Type: cty.String, - Optional: true, - }, - "certificate": { - Type: cty.String, - Optional: true, - }, - "host_key": { - Type: cty.String, - Optional: true, - }, - "agent": { - Type: cty.Bool, - Optional: true, - }, - "agent_identity": { - Type: cty.String, - Optional: true, - }, - "bastion_host": { - Type: cty.String, - Optional: true, - }, - "bastion_host_key": { - Type: cty.String, - Optional: true, - }, - "bastion_port": { - Type: cty.Number, - Optional: true, - }, - "bastion_user": { - Type: cty.String, - Optional: true, - }, - "bastion_password": { - Type: cty.String, - Optional: true, - }, - "bastion_private_key": { - Type: cty.String, - Optional: true, - }, - "bastion_certificate": { - Type: cty.String, - Optional: true, - }, - - // For type=winrm only (enforced in winrm communicator) - "https": { - Type: cty.Bool, - Optional: true, - }, - "insecure": { - Type: cty.Bool, - Optional: true, - }, - "cacert": { - Type: cty.String, - Optional: true, - }, - "use_ntlm": { - Type: cty.Bool, - Optional: true, - }, - }, -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. It's exported only for use in the -// configs/configupgrade package and should not be used from anywhere else. -// The caller may not modify any part of the returned schema data structure. -func ConnectionBlockSupersetSchema() *configschema.Block { - return connectionBlockSupersetSchema -} - -// EvalValidateResource is an EvalNode implementation that validates -// the configuration of a resource. -type EvalValidateResource struct { - Addr addrs.Resource - Provider *providers.Interface - ProviderSchema **ProviderSchema - Config *configs.Resource - - // IgnoreWarnings means that warnings will not be passed through. This allows - // "just-in-time" passes of validation to continue execution through warnings. - IgnoreWarnings bool - - // ConfigVal, if non-nil, will be updated with the value resulting from - // evaluating the given configuration body. Since validation is performed - // very early, this value is likely to contain lots of unknown values, - // but its type will conform to the schema of the resource type associated - // with the resource instance being validated. - ConfigVal *cty.Value -} - -func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr) - } - - var diags tfdiags.Diagnostics - provider := *n.Provider - cfg := *n.Config - schema := *n.ProviderSchema - mode := cfg.Mode - - keyData := EvalDataForNoInstanceKey - if n.Config.Count != nil { - // If the config block has count, we'll evaluate with an unknown - // number as count.index so we can still type check even though - // we won't expand count until the plan phase. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // Basic type-checking of the count argument. More complete validation - // of this will happen when we DynamicExpand during the plan walk. - countDiags := n.validateCount(ctx, n.Config.Count) - diags = diags.Append(countDiags) - } - - if n.Config.ForEach != nil { - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.UnknownVal(cty.DynamicPseudoType), - } - - // Evaluate the for_each expression here so we can expose the diagnostics - forEachDiags := n.validateForEach(ctx, n.Config.ForEach) - diags = diags.Append(forEachDiags) - } - - for _, traversal := range n.Config.DependsOn { - ref, refDiags := addrs.ParseRef(traversal) - diags = diags.Append(refDiags) - if !refDiags.HasErrors() && len(ref.Remaining) != 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid depends_on reference", - Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", - Subject: ref.Remaining.SourceRange().Ptr(), - }) - } - - // The ref must also refer to something that exists. To test that, - // we'll just eval it and count on the fact that our evaluator will - // detect references to non-existent objects. - if !diags.HasErrors() { - scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) - if scope != nil { // sometimes nil in tests, due to incomplete mocks - _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) - diags = diags.Append(refDiags) - } - } - } - - // Provider entry point varies depending on resource mode, because - // managed resources and data resources are two distinct concepts - // in the provider abstraction. - switch mode { - case addrs.ManagedResourceMode: - schema, _ := schema.SchemaForResourceType(mode, cfg.Type) - if schema == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type", - Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type), - Subject: &cfg.TypeRange, - }) - return nil, diags.Err() - } - - configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return nil, diags.Err() - } - - if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks - for _, traversal := range cfg.Managed.IgnoreChanges { - moreDiags := schema.StaticValidateTraversal(traversal) - diags = diags.Append(moreDiags) - } - } - - req := providers.ValidateResourceTypeConfigRequest{ - TypeName: cfg.Type, - Config: configVal, - } - - resp := provider.ValidateResourceTypeConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) - - if n.ConfigVal != nil { - *n.ConfigVal = configVal - } - - case addrs.DataResourceMode: - schema, _ := schema.SchemaForResourceType(mode, cfg.Type) - if schema == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source", - Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type), - Subject: &cfg.TypeRange, - }) - return nil, diags.Err() - } - - configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return nil, diags.Err() - } - - req := providers.ValidateDataSourceConfigRequest{ - TypeName: cfg.Type, - Config: configVal, - } - - resp := provider.ValidateDataSourceConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) - } - - if n.IgnoreWarnings { - // If we _only_ have warnings then we'll return nil. - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - return nil, nil - } else { - // We'll return an error if there are any diagnostics at all, even if - // some of them are warnings. - return nil, diags.NonFatalErr() - } -} - -func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics { - if expr == nil { - return nil - } - - var diags tfdiags.Diagnostics - - countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) - diags = diags.Append(countDiags) - if diags.HasErrors() { - return diags - } - - if countVal.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is null. An integer is required.`, - Subject: expr.Range().Ptr(), - }) - return diags - } - - var err error - countVal, err = convert.Convert(countVal, cty.Number) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return diags - } - - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk. - if !countVal.IsKnown() { - return diags - } - - // If we _do_ know the value, then we can do a few more checks here. - var count int - err = gocty.FromCtyValue(countVal, &count) - if err != nil { - // Isn't a whole number, etc. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return diags - } - - if count < 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is unsuitable: count cannot be negative.`, - Subject: expr.Range().Ptr(), - }) - return diags - } - - return diags -} - -func (n *EvalValidateResource) validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { - _, known, forEachDiags := evaluateResourceForEachExpressionKnown(expr, ctx) - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk - if !known { - return diags - } - - if forEachDiags.HasErrors() { - diags = diags.Append(forEachDiags) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go deleted file mode 100644 index c9cc0e6d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go +++ /dev/null @@ -1,67 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that -// expressions within a particular referencable block do not reference that -// same block. -type EvalValidateSelfRef struct { - Addr addrs.Referenceable - Config hcl.Body - ProviderSchema **ProviderSchema -} - -func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - addr := n.Addr - - addrStrs := make([]string, 0, 1) - addrStrs = append(addrStrs, addr.String()) - switch tAddr := addr.(type) { - case addrs.ResourceInstance: - // A resource instance may not refer to its containing resource either. - addrStrs = append(addrStrs, tAddr.ContainingResource().String()) - } - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr) - } - - providerSchema := *n.ProviderSchema - var schema *configschema.Block - switch tAddr := addr.(type) { - case addrs.Resource: - schema, _ = providerSchema.SchemaForResourceAddr(tAddr) - case addrs.ResourceInstance: - schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource()) - } - - if schema == nil { - return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr) - } - - refs, _ := lang.ReferencesInBlock(n.Config, schema) - for _, ref := range refs { - for _, addrStr := range addrStrs { - if ref.Subject.String() == addrStr { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Self-referential block", - Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - } - } - - return nil, diags.NonFatalErr() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go deleted file mode 100644 index 79f44b3f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go +++ /dev/null @@ -1,96 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// EvalSetModuleCallArguments is an EvalNode implementation that sets values -// for arguments of a child module call, for later retrieval during -// expression evaluation. -type EvalSetModuleCallArguments struct { - Module addrs.ModuleCallInstance - Values map[string]cty.Value -} - -// TODO: test -func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) { - ctx.SetModuleCallArguments(n.Module, n.Values) - return nil, nil -} - -// EvalModuleCallArgument is an EvalNode implementation that produces the value -// for a particular variable as will be used by a child module instance. -// -// The result is written into the map given in Values, with its key -// set to the local name of the variable, disregarding the module instance -// address. Any existing values in that map are deleted first. This weird -// interface is a result of trying to be convenient for use with -// EvalContext.SetModuleCallArguments, which expects a map to merge in with -// any existing arguments. -type EvalModuleCallArgument struct { - Addr addrs.InputVariable - Config *configs.Variable - Expr hcl.Expression - - // If this flag is set, any diagnostics are discarded and this operation - // will always succeed, though may produce an unknown value in the - // event of an error. - IgnoreDiagnostics bool - - Values map[string]cty.Value -} - -func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) { - // Clear out the existing mapping - for k := range n.Values { - delete(n.Values, k) - } - - wantType := n.Config.Type - name := n.Addr.Name - expr := n.Expr - - if expr == nil { - // Should never happen, but we'll bail out early here rather than - // crash in case it does. We set no value at all in this case, - // making a subsequent call to EvalContext.SetModuleCallArguments - // a no-op. - log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String()) - return nil, nil - } - - val, diags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) - - // We intentionally passed DynamicPseudoType to EvaluateExpr above because - // now we can do our own local type conversion and produce an error message - // with better context if it fails. - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid value for module argument", - Detail: fmt.Sprintf( - "The given value is not suitable for child module variable %q defined at %s: %s.", - name, n.Config.DeclRange.String(), convErr, - ), - Subject: expr.Range().Ptr(), - }) - // We'll return a placeholder unknown value to avoid producing - // redundant downstream errors. - val = cty.UnknownVal(wantType) - } - - n.Values[name] = val - if n.IgnoreDiagnostics { - return nil, nil - } - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go deleted file mode 100644 index d4a8d3cf..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go +++ /dev/null @@ -1,88 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" -) - -// ProviderEvalTree returns the evaluation tree for initializing and -// configuring providers. -func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode { - var provider providers.Interface - - addr := n.Addr - relAddr := addr.ProviderConfig - - seq := make([]EvalNode, 0, 5) - seq = append(seq, &EvalInitProvider{ - TypeName: relAddr.Type, - Addr: addr.ProviderConfig, - }) - - // Input stuff - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - }, - }, - }) - - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkValidate}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - &EvalValidateProvider{ - Addr: relAddr, - Provider: &provider, - Config: config, - }, - }, - }, - }) - - // Apply stuff - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - }, - }, - }) - - // We configure on everything but validate, since validate may - // not have access to all the variables. - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalConfigProvider{ - Addr: relAddr, - Provider: &provider, - Config: config, - }, - }, - }, - }) - - return &EvalSequence{Nodes: seq} -} - -// CloseProviderEvalTree returns the evaluation tree for closing -// provider connections that aren't needed anymore. -func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode { - return &EvalCloseProvider{Addr: addr.ProviderConfig} -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go deleted file mode 100644 index 2d3eabd4..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go +++ /dev/null @@ -1,838 +0,0 @@ -package terraform - -import ( - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/agext/levenshtein" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Evaluator provides the necessary contextual data for evaluating expressions -// for a particular walk operation. -type Evaluator struct { - // Operation defines what type of operation this evaluator is being used - // for. - Operation walkOperation - - // Meta is contextual metadata about the current operation. - Meta *ContextMeta - - // Config is the root node in the configuration tree. - Config *configs.Config - - // VariableValues is a map from variable names to their associated values, - // within the module indicated by ModulePath. VariableValues is modified - // concurrently, and so it must be accessed only while holding - // VariableValuesLock. - // - // The first map level is string representations of addr.ModuleInstance - // values, while the second level is variable names. - VariableValues map[string]map[string]cty.Value - VariableValuesLock *sync.Mutex - - // Schemas is a repository of all of the schemas we should need to - // evaluate expressions. This must be constructed by the caller to - // include schemas for all of the providers, resource types, data sources - // and provisioners used by the given configuration and state. - // - // This must not be mutated during evaluation. - Schemas *Schemas - - // State is the current state, embedded in a wrapper that ensures that - // it can be safely accessed and modified concurrently. - State *states.SyncState - - // Changes is the set of proposed changes, embedded in a wrapper that - // ensures they can be safely accessed and modified concurrently. - Changes *plans.ChangesSync -} - -// Scope creates an evaluation scope for the given module path and optional -// resource. -// -// If the "self" argument is nil then the "self" object is not available -// in evaluated expressions. Otherwise, it behaves as an alias for the given -// address. -func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope { - return &lang.Scope{ - Data: data, - SelfAddr: self, - PureOnly: e.Operation != walkApply && e.Operation != walkDestroy, - BaseDir: ".", // Always current working directory for now. - } -} - -// evaluationStateData is an implementation of lang.Data that resolves -// references primarily (but not exclusively) using information from a State. -type evaluationStateData struct { - Evaluator *Evaluator - - // ModulePath is the path through the dynamic module tree to the module - // that references will be resolved relative to. - ModulePath addrs.ModuleInstance - - // InstanceKeyData describes the values, if any, that are accessible due - // to repetition of a containing object using "count" or "for_each" - // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, - // since the user specifies in that case which variable name to locally - // shadow.) - InstanceKeyData InstanceKeyEvalData - - // Operation records the type of walk the evaluationStateData is being used - // for. - Operation walkOperation -} - -// InstanceKeyEvalData is used during evaluation to specify which values, -// if any, should be produced for count.index, each.key, and each.value. -type InstanceKeyEvalData struct { - // CountIndex is the value for count.index, or cty.NilVal if evaluating - // in a context where the "count" argument is not active. - // - // For correct operation, this should always be of type cty.Number if not - // nil. - CountIndex cty.Value - - // EachKey and EachValue are the values for each.key and each.value - // respectively, or cty.NilVal if evaluating in a context where the - // "for_each" argument is not active. These must either both be set - // or neither set. - // - // For correct operation, EachKey must always be either of type cty.String - // or cty.Number if not nil. - EachKey, EachValue cty.Value -} - -// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for -// evaluating in a context that has the given instance key. -func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData { - var countIdx cty.Value - var eachKey cty.Value - var eachVal cty.Value - - if intKey, ok := key.(addrs.IntKey); ok { - countIdx = cty.NumberIntVal(int64(intKey)) - } - - if stringKey, ok := key.(addrs.StringKey); ok { - eachKey = cty.StringVal(string(stringKey)) - eachVal = forEachMap[string(stringKey)] - } - - return InstanceKeyEvalData{ - CountIndex: countIdx, - EachKey: eachKey, - EachValue: eachVal, - } -} - -// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance -// key values at all, suitable for use in contexts where no keyed instance -// is relevant. -var EvalDataForNoInstanceKey = InstanceKeyEvalData{} - -// evaluationStateData must implement lang.Data -var _ lang.Data = (*evaluationStateData)(nil) - -func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "index": - idxVal := d.InstanceKeyData.CountIndex - if idxVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "count" in non-counted context`, - Detail: fmt.Sprintf(`The "count" object can be used only in "resource" and "data" blocks, and only when the "count" argument is set.`), - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.Number), diags - } - return idxVal, diags - - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "count" attribute`, - Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var returnVal cty.Value - switch addr.Name { - - case "key": - returnVal = d.InstanceKeyData.EachKey - case "value": - returnVal = d.InstanceKeyData.EachValue - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "each" attribute`, - Detail: fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - if returnVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "each" in context without for_each`, - Detail: fmt.Sprintf(`The "each" object can be used only in "resource" blocks, and only when the "for_each" argument is set.`), - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.DynamicPseudoType), diags - } - return returnVal, diags -} - -func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // First we'll make sure the requested value is declared in configuration, - // so we can produce a nice message if not. - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) - } - - config := moduleConfig.Module.Variables[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Variables { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } else { - suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared input variable`, - Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - wantType := cty.DynamicPseudoType - if config.Type != cty.NilType { - wantType = config.Type - } - - d.Evaluator.VariableValuesLock.Lock() - defer d.Evaluator.VariableValuesLock.Unlock() - - // During the validate walk, input variables are always unknown so - // that we are validating the configuration for all possible input values - // rather than for a specific set. Checking against a specific set of - // input values then happens during the plan walk. - // - // This is important because otherwise the validation walk will tend to be - // overly strict, requiring expressions throughout the configuration to - // be complicated to accommodate all possible inputs, whereas returning - // known here allows for simpler patterns like using input values as - // guards to broadly enable/disable resources, avoid processing things - // that are disabled, etc. Terraform's static validation leans towards - // being liberal in what it accepts because the subsequent plan walk has - // more information available and so can be more conservative. - if d.Operation == walkValidate { - return cty.UnknownVal(wantType), diags - } - - moduleAddrStr := d.ModulePath.String() - vals := d.Evaluator.VariableValues[moduleAddrStr] - if vals == nil { - return cty.UnknownVal(wantType), diags - } - - val, isSet := vals[addr.Name] - if !isSet { - if config.Default != cty.NilVal { - return config.Default, diags - } - return cty.UnknownVal(wantType), diags - } - - var err error - val, err = convert.Convert(val, wantType) - if err != nil { - // We should never get here because this problem should've been caught - // during earlier validation, but we'll do something reasonable anyway. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Incorrect variable type`, - Detail: fmt.Sprintf(`The resolved value of variable %q is not appropriate: %s.`, addr.Name, err), - Subject: &config.DeclRange, - }) - // Stub out our return value so that the semantic checker doesn't - // produce redundant downstream errors. - val = cty.UnknownVal(wantType) - } - - return val, diags -} - -func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // First we'll make sure the requested value is declared in configuration, - // so we can produce a nice message if not. - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) - } - - config := moduleConfig.Module.Locals[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Locals { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared local value`, - Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath)) - if val == cty.NilVal { - // Not evaluated yet? - val = cty.DynamicVal - } - - return val, diags -} - -func (d *evaluationStateData) GetModuleInstance(addr addrs.ModuleCallInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Output results live in the module that declares them, which is one of - // the child module instances of our current module path. - moduleAddr := addr.ModuleInstance(d.ModulePath) - - // We'll consult the configuration to see what output names we are - // expecting, so we can ensure the resulting object is of the expected - // type even if our data is incomplete for some reason. - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // should never happen, since this should've been caught during - // static validation. - panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) - } - outputConfigs := moduleConfig.Module.Outputs - - vals := map[string]cty.Value{} - for n := range outputConfigs { - addr := addrs.OutputValue{Name: n}.Absolute(moduleAddr) - - // If a pending change is present in our current changeset then its value - // takes priority over what's in state. (It will usually be the same but - // will differ if the new value is unknown during planning.) - if changeSrc := d.Evaluator.Changes.GetOutputChange(addr); changeSrc != nil { - change, err := changeSrc.Decode() - if err != nil { - // This should happen only if someone has tampered with a plan - // file, so we won't bother with a pretty error for it. - diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err)) - vals[n] = cty.DynamicVal - continue - } - // We care only about the "after" value, which is the value this output - // will take on after the plan is applied. - vals[n] = change.After - } else { - os := d.Evaluator.State.OutputValue(addr) - if os == nil { - // Not evaluated yet? - vals[n] = cty.DynamicVal - continue - } - vals[n] = os.Value - } - } - return cty.ObjectVal(vals), diags -} - -func (d *evaluationStateData) GetModuleInstanceOutput(addr addrs.ModuleCallOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Output results live in the module that declares them, which is one of - // the child module instances of our current module path. - absAddr := addr.AbsOutputValue(d.ModulePath) - moduleAddr := absAddr.Module - - // First we'll consult the configuration to see if an output of this - // name is declared at all. - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // this doesn't happen in normal circumstances due to our validation - // pass, but it can turn up in some unusual situations, like in the - // "terraform console" repl where arbitrary expressions can be - // evaluated. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared module`, - Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - config := moduleConfig.Module.Outputs[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Outputs { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared output value`, - Detail: fmt.Sprintf(`An output value with the name %q has not been declared in %s.%s`, addr.Name, moduleDisplayAddr(moduleAddr), suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - // If a pending change is present in our current changeset then its value - // takes priority over what's in state. (It will usually be the same but - // will differ if the new value is unknown during planning.) - if changeSrc := d.Evaluator.Changes.GetOutputChange(absAddr); changeSrc != nil { - change, err := changeSrc.Decode() - if err != nil { - // This should happen only if someone has tampered with a plan - // file, so we won't bother with a pretty error for it. - diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", absAddr, err)) - return cty.DynamicVal, diags - } - // We care only about the "after" value, which is the value this output - // will take on after the plan is applied. - return change.After, diags - } - - os := d.Evaluator.State.OutputValue(absAddr) - if os == nil { - // Not evaluated yet? - return cty.DynamicVal, diags - } - - return os.Value, diags -} - -func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "cwd": - wd, err := os.Getwd() - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Failed to get working directory`, - Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - return cty.StringVal(filepath.ToSlash(wd)), diags - - case "module": - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath)) - } - sourceDir := moduleConfig.Module.SourceDir - return cty.StringVal(filepath.ToSlash(sourceDir)), diags - - case "root": - sourceDir := d.Evaluator.Config.Module.SourceDir - return cty.StringVal(filepath.ToSlash(sourceDir)), diags - - default: - suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"}) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "path" attribute`, - Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - // First we'll consult the configuration to see if an resource of this - // name is declared at all. - moduleAddr := d.ModulePath - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) - } - - config := moduleConfig.Module.ResourceByAddr(addr) - if config == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Type, addr.Name, moduleDisplayAddr(moduleAddr)), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - rs := d.Evaluator.State.Resource(addr.Absolute(d.ModulePath)) - - if rs == nil { - // we must return DynamicVal so that both interpretations - // can proceed without generating errors, and we'll deal with this - // in a later step where more information is gathered. - // (In practice we should only end up here during the validate walk, - // since later walks should have at least partial states populated - // for all resources in the configuration.) - return cty.DynamicVal, diags - } - - // Break out early during validation, because resource may not be expanded - // yet and indexed references may show up as invalid. - if d.Operation == walkValidate { - return cty.DynamicVal, diags - } - - return d.getResourceInstancesAll(addr, rng, config, rs, rs.ProviderConfig) -} - -func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng tfdiags.SourceRange, config *configs.Resource, rs *states.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - instAddr := addrs.ResourceInstance{Resource: addr, Key: addrs.NoKey} - - schema := d.getResourceSchema(addr, providerAddr) - if schema == nil { - // This shouldn't happen, since validation before we get here should've - // taken care of it, but we'll show a reasonable error message anyway. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource type schema`, - Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - switch rs.EachMode { - case states.NoEach: - ty := schema.ImpliedType() - is := rs.Instances[addrs.NoKey] - if is == nil || is.Current == nil { - // Assume we're dealing with an instance that hasn't been created yet. - return cty.UnknownVal(ty), diags - } - - if is.Current.Status == states.ObjectPlanned { - // If there's a pending change for this instance in our plan, we'll prefer - // that. This is important because the state can't represent unknown values - // and so its data is inaccurate when changes are pending. - if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr.Absolute(d.ModulePath), states.CurrentGen); change != nil { - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", addr.Absolute(d.ModulePath), err), - Subject: &config.DeclRange, - }) - return cty.UnknownVal(ty), diags - } - return val, diags - } else { - // If the object is in planned status then we should not - // get here, since we should've found a pending value - // in the plan above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", addr), - Subject: &config.DeclRange, - }) - return cty.UnknownVal(ty), diags - } - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here - // we should've upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", addr.Absolute(d.ModulePath), err), - Subject: &config.DeclRange, - }) - return cty.UnknownVal(ty), diags - } - - return ios.Value, diags - - case states.EachList: - // We need to infer the length of our resulting tuple by searching - // for the max IntKey in our instances map. - length := 0 - for k := range rs.Instances { - if ik, ok := k.(addrs.IntKey); ok { - if int(ik) >= length { - length = int(ik) + 1 - } - } - } - - vals := make([]cty.Value, length) - for i := 0; i < length; i++ { - ty := schema.ImpliedType() - key := addrs.IntKey(i) - is, exists := rs.Instances[key] - if exists && is.Current != nil { - instAddr := addr.Instance(key).Absolute(d.ModulePath) - - // Prefer pending value in plan if present. See getResourceInstanceSingle - // comment for the rationale. - if is.Current.Status == states.ObjectPlanned { - if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[i] = val - continue - } else { - // If the object is in planned status then we should not - // get here, since we should've found a pending value - // in the plan above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), - Subject: &config.DeclRange, - }) - continue - } - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here - // we should've upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[i] = ios.Value - } else { - // There shouldn't normally be "gaps" in our list but we'll - // allow it under the assumption that we're in a weird situation - // where e.g. someone has run "terraform state mv" to reorder - // a list and left a hole behind. - vals[i] = cty.UnknownVal(schema.ImpliedType()) - } - } - - // We use a tuple rather than a list here because resource schemas may - // include dynamically-typed attributes, which will then cause each - // instance to potentially have a different runtime type even though - // they all conform to the static schema. - return cty.TupleVal(vals), diags - - case states.EachMap: - ty := schema.ImpliedType() - vals := make(map[string]cty.Value, len(rs.Instances)) - for k, is := range rs.Instances { - if sk, ok := k.(addrs.StringKey); ok { - instAddr := addr.Instance(k).Absolute(d.ModulePath) - - // Prefer pending value in plan if present. See getResourceInstanceSingle - // comment for the rationale. - // Prefer pending value in plan if present. See getResourceInstanceSingle - // comment for the rationale. - if is.Current.Status == states.ObjectPlanned { - if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[string(sk)] = val - continue - } else { - // If the object is in planned status then we should not - // get here, since we should've found a pending value - // in the plan above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), - Subject: &config.DeclRange, - }) - continue - } - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here - // we should've upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[string(sk)] = ios.Value - } - } - - // We use an object rather than a map here because resource schemas may - // include dynamically-typed attributes, which will then cause each - // instance to potentially have a different runtime type even though - // they all conform to the static schema. - return cty.ObjectVal(vals), diags - - default: - // Should never happen since caller should deal with other modes - panic(fmt.Sprintf("unsupported EachMode %s", rs.EachMode)) - } -} - -func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.AbsProviderConfig) *configschema.Block { - providerType := providerAddr.ProviderConfig.Type - schemas := d.Evaluator.Schemas - schema, _ := schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) - return schema -} - -func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "workspace": - workspaceName := d.Evaluator.Meta.Env - return cty.StringVal(workspaceName), diags - - case "env": - // Prior to Terraform 0.12 there was an attribute "env", which was - // an alias name for "workspace". This was deprecated and is now - // removed. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "terraform" attribute`, - Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was rename to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`, - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "terraform" attribute`, - Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -// nameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func nameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} - -// moduleDisplayAddr returns a string describing the given module instance -// address that is appropriate for returning to users in situations where the -// root module is possible. Specifically, it returns "the root module" if the -// root module instance is given, or a string representation of the module -// address otherwise. -func moduleDisplayAddr(addr addrs.ModuleInstance) string { - switch { - case addr.IsRoot(): - return "the root module" - default: - return addr.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go deleted file mode 100644 index 35a8be0c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go +++ /dev/null @@ -1,299 +0,0 @@ -package terraform - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// StaticValidateReferences checks the given references against schemas and -// other statically-checkable rules, producing error diagnostics if any -// problems are found. -// -// If this method returns errors for a particular reference then evaluating -// that reference is likely to generate a very similar error, so callers should -// not run this method and then also evaluate the source expression(s) and -// merge the two sets of diagnostics together, since this will result in -// confusing redundant errors. -// -// This method can find more errors than can be found by evaluating an -// expression with a partially-populated scope, since it checks the referenced -// names directly against the schema rather than relying on evaluation errors. -// -// The result may include warning diagnostics if, for example, deprecated -// features are referenced. -func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - for _, ref := range refs { - moreDiags := d.staticValidateReference(ref, self) - diags = diags.Append(moreDiags) - } - return diags -} - -func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { - modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if modCfg == nil { - // This is a bug in the caller rather than a problem with the - // reference, but rather than crashing out here in an unhelpful way - // we'll just ignore it and trust a different layer to catch it. - return nil - } - - if ref.Subject == addrs.Self { - // The "self" address is a special alias for the address given as - // our self parameter here, if present. - if self == nil { - var diags tfdiags.Diagnostics - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "self" reference`, - // This detail message mentions some current practice that - // this codepath doesn't really "know about". If the "self" - // object starts being supported in more contexts later then - // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - return diags - } - - synthRef := *ref // shallow copy - synthRef.Subject = self - ref = &synthRef - } - - switch addr := ref.Subject.(type) { - - // For static validation we validate both resource and resource instance references the same way. - // We mostly disregard the index, though we do some simple validation of - // its _presence_ in staticValidateSingleResourceReference and - // staticValidateMultiResourceReference respectively. - case addrs.Resource: - var diags tfdiags.Diagnostics - diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - return diags - case addrs.ResourceInstance: - var diags tfdiags.Diagnostics - diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange)) - return diags - - // We also handle all module call references the same way, disregarding index. - case addrs.ModuleCall: - return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange) - case addrs.ModuleCallInstance: - return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange) - case addrs.ModuleCallOutput: - // This one is a funny one because we will take the output name referenced - // and use it to fake up a "remaining" that would make sense for the - // module call itself, rather than for the specific output, and then - // we can just re-use our static module call validation logic. - remain := make(hcl.Traversal, len(ref.Remaining)+1) - copy(remain[1:], ref.Remaining) - remain[0] = hcl.TraverseAttr{ - Name: addr.Name, - - // Using the whole reference as the source range here doesn't exactly - // match how HCL would normally generate an attribute traversal, - // but is close enough for our purposes. - SrcRange: ref.SourceRange.ToHCL(), - } - return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange) - - default: - // Anything else we'll just permit through without any static validation - // and let it be caught during dynamic evaluation, in evaluate.go . - return nil - } -} - -func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - // If we have at least one step in "remain" and this resource has - // "count" set then we know for sure this in invalid because we have - // something like: - // aws_instance.foo.bar - // ...when we really need - // aws_instance.foo[count.index].bar - - // It is _not_ safe to do this check when remain is empty, because that - // would also match aws_instance.foo[count.index].bar due to `count.index` - // not being statically-resolvable as part of a reference, and match - // direct references to the whole aws_instance.foo tuple. - if len(remain) == 0 { - return nil - } - - var diags tfdiags.Diagnostics - - cfg := modCfg.Module.ResourceByAddr(addr) - if cfg == nil { - // We'll just bail out here and catch this in our subsequent call to - // staticValidateResourceReference, then. - return diags - } - - if cfg.Count != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource instance key`, - Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr), - Subject: rng.ToHCL().Ptr(), - }) - } - if cfg.ForEach != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource instance key`, - Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr), - Subject: rng.ToHCL().Ptr(), - }) - } - - return diags -} - -func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource()) - if cfg == nil { - // We'll just bail out here and catch this in our subsequent call to - // staticValidateResourceReference, then. - return diags - } - - if addr.Key == addrs.NoKey { - // This is a different path into staticValidateSingleResourceReference - return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng) - } else { - if cfg.Count == nil && cfg.ForEach == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Unexpected resource instance key`, - Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()), - Subject: rng.ToHCL().Ptr(), - }) - } - } - - return diags -} - -func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - var modeAdjective string - switch addr.Mode { - case addrs.ManagedResourceMode: - modeAdjective = "managed" - case addrs.DataResourceMode: - modeAdjective = "data" - default: - // should never happen - modeAdjective = "" - } - - cfg := modCfg.Module.ResourceByAddr(addr) - if cfg == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path)), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - // Normally accessing this directly is wrong because it doesn't take into - // account provider inheritance, etc but it's okay here because we're only - // paying attention to the type anyway. - providerType := cfg.ProviderConfigAddr().Type - schema, _ := d.Evaluator.Schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) - - if schema == nil { - // Prior validation should've taken care of a resource block with an - // unsupported type, so we should never get here but we'll handle it - // here anyway for robustness. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid resource type`, - Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerType), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - // As a special case we'll detect attempts to access an attribute called - // "count" and produce a special error for it, since versions of Terraform - // prior to v0.12 offered this as a weird special case that we can no - // longer support. - if len(remain) > 0 { - if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid resource count attribute`, - Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - } - - // If we got this far then we'll try to validate the remaining traversal - // steps against our schema. - moreDiags := schema.StaticValidateTraversal(remain) - diags = diags.Append(moreDiags) - - return diags -} - -func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // For now, our focus here is just in testing that the referenced module - // call exists. All other validation is deferred until evaluation time. - _, exists := modCfg.Module.ModuleCalls[addr.Name] - if !exists { - var suggestions []string - for name := range modCfg.Module.ModuleCalls { - suggestions = append(suggestions, name) - } - sort.Strings(suggestions) - suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared module`, - Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - return diags -} - -// moduleConfigDisplayAddr returns a string describing the given module -// address that is appropriate for returning to users in situations where the -// root module is possible. Specifically, it returns "the root module" if the -// root module instance is given, or a string representation of the module -// address otherwise. -func moduleConfigDisplayAddr(addr addrs.Module) string { - switch { - case addr.IsRoot(): - return "the root module" - default: - return addr.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go deleted file mode 100644 index 97c77bdb..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -import "os" - -// This file holds feature flags for the next release - -var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go deleted file mode 100644 index 36e295b6..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go +++ /dev/null @@ -1,141 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// Graph represents the graph that Terraform uses to represent resources -// and their dependencies. -type Graph struct { - // Graph is the actual DAG. This is embedded so you can call the DAG - // methods directly. - dag.AcyclicGraph - - // Path is the path in the module tree that this Graph represents. - Path addrs.ModuleInstance - - // debugName is a name for reference in the debug output. This is usually - // to indicate what topmost builder was, and if this graph is a shadow or - // not. - debugName string -} - -func (g *Graph) DirectedGraph() dag.Grapher { - return &g.AcyclicGraph -} - -// Walk walks the graph with the given walker for callbacks. The graph -// will be walked with full parallelism, so the walker should expect -// to be called in concurrently. -func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics { - return g.walk(walker) -} - -func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { - // The callbacks for enter/exiting a graph - ctx := walker.EnterPath(g.Path) - defer walker.ExitPath(g.Path) - - // Get the path for logs - path := ctx.Path().String() - - debugName := "walk-graph.json" - if g.debugName != "" { - debugName = g.debugName + "-" + debugName - } - - // Walk the graph. - var walkFn dag.WalkFunc - walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) { - log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) - g.DebugVisitInfo(v, g.debugName) - - defer func() { - log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v)) - }() - - walker.EnterVertex(v) - defer walker.ExitVertex(v, diags) - - // vertexCtx is the context that we use when evaluating. This - // is normally the context of our graph but can be overridden - // with a GraphNodeSubPath impl. - vertexCtx := ctx - if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { - vertexCtx = walker.EnterPath(pn.Path()) - defer walker.ExitPath(pn.Path()) - } - - // If the node is eval-able, then evaluate it. - if ev, ok := v.(GraphNodeEvalable); ok { - tree := ev.EvalTree() - if tree == nil { - panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v)) - } - - // Allow the walker to change our tree if needed. Eval, - // then callback with the output. - log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) - - tree = walker.EnterEvalTree(v, tree) - output, err := Eval(tree, vertexCtx) - diags = diags.Append(walker.ExitEvalTree(v, output, err)) - if diags.HasErrors() { - return - } - } - - // If the node is dynamically expanded, then expand it - if ev, ok := v.(GraphNodeDynamicExpandable); ok { - log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) - - g, err := ev.DynamicExpand(vertexCtx) - if err != nil { - diags = diags.Append(err) - return - } - if g != nil { - // Walk the subgraph - log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v)) - subDiags := g.walk(walker) - diags = diags.Append(subDiags) - if subDiags.HasErrors() { - log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v)) - return - } - log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v)) - } else { - log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v)) - } - } - - // If the node has a subgraph, then walk the subgraph - if sn, ok := v.(GraphNodeSubgraph); ok { - log.Printf("[TRACE] vertex %q: entering static subgraph", dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) - - subDiags := sn.Subgraph().(*Graph).walk(walker) - if subDiags.HasErrors() { - log.Printf("[TRACE] vertex %q: static subgraph encountered errors", dag.VertexName(v)) - return - } - log.Printf("[TRACE] vertex %q: static subgraph completed successfully", dag.VertexName(v)) - } - - return - } - - return g.AcyclicGraph.Walk(walkFn) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go deleted file mode 100644 index ee2c5857..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go +++ /dev/null @@ -1,85 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// GraphBuilder is an interface that can be implemented and used with -// Terraform to build the graph that Terraform walks. -type GraphBuilder interface { - // Build builds the graph for the given module path. It is up to - // the interface implementation whether this build should expand - // the graph or not. - Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) -} - -// BasicGraphBuilder is a GraphBuilder that builds a graph out of a -// series of transforms and (optionally) validates the graph is a valid -// structure. -type BasicGraphBuilder struct { - Steps []GraphTransformer - Validate bool - // Optional name to add to the graph debug log - Name string -} - -func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - g := &Graph{Path: path} - - var lastStepStr string - for _, step := range b.Steps { - if step == nil { - continue - } - log.Printf("[TRACE] Executing graph transform %T", step) - - stepName := fmt.Sprintf("%T", step) - dot := strings.LastIndex(stepName, ".") - if dot >= 0 { - stepName = stepName[dot+1:] - } - - debugOp := g.DebugOperation(stepName, "") - err := step.Transform(g) - - errMsg := "" - if err != nil { - errMsg = err.Error() - } - debugOp.End(errMsg) - - if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { - log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s------", step, thisStepStr) - lastStepStr = thisStepStr - } else { - log.Printf("[TRACE] Completed graph transform %T (no changes)", step) - } - - if err != nil { - if nf, isNF := err.(tfdiags.NonFatalError); isNF { - diags = diags.Append(nf.Diagnostics) - } else { - diags = diags.Append(err) - return g, diags - } - } - } - - // Validate the graph structure - if b.Validate { - if err := g.Validate(); err != nil { - log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) - diags = diags.Append(err) - return nil, diags - } - } - - return g, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go deleted file mode 100644 index 91898761..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go +++ /dev/null @@ -1,211 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ApplyGraphBuilder implements GraphBuilder and is responsible for building -// a graph for applying a Terraform diff. -// -// Because the graph is built from the diff (vs. the config or state), -// this helps ensure that the apply-time graph doesn't modify any resources -// that aren't explicitly in the diff. There are other scenarios where the -// diff can be deviated, so this is just one layer of protection. -type ApplyGraphBuilder struct { - // Config is the configuration tree that the diff was built from. - Config *configs.Config - - // Changes describes the changes that we need apply. - Changes *plans.Changes - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target. This is only required to make sure - // unnecessary outputs aren't included in the apply graph. The plan - // builder successfully handles targeting resources. In the future, - // outputs should go into the diff so that this is unnecessary. - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Destroy, if true, represents a pure destroy operation - Destroy bool - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "ApplyGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeApplyableResource{ - NodeAbstractResource: a, - } - } - - concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeDestroyResource{ - NodeAbstractResource: a, - } - } - - concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodeApplyableResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - steps := []GraphTransformer{ - // Creates all the resources represented in the config. During apply, - // we use this just to ensure that the whole-resource metadata is - // updated to reflect things such as whether the count argument is - // set in config, or which provider configuration manages each resource. - &ConfigTransformer{ - Concrete: concreteResource, - Config: b.Config, - }, - - // Creates all the resource instances represented in the diff, along - // with dependency edges against the whole-resource nodes added by - // ConfigTransformer above. - &DiffTransformer{ - Concrete: concreteResourceInstance, - State: b.State, - Changes: b.Changes, - }, - - // Creates extra cleanup nodes for any entire resources that are - // no longer present in config, so we can make sure we clean up the - // leftover empty resource states after the instances have been - // destroyed. - // (We don't track this particular type of change in the plan because - // it's just cleanup of our own state object, and so doesn't effect - // any real remote objects or consumable outputs.) - &OrphanResourceTransformer{ - Concrete: concreteOrphanResource, - Config: b.Config, - State: b.State, - }, - - // Create orphan output nodes - &OrphanOutputTransformer{Config: b.Config, State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Provisioner-related transformations - &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, - &ProvisionerTransformer{}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - // add providers - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Remove modules no longer present in the config - &RemovedModuleTransformer{Config: b.Config, State: b.State}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Destruction ordering - &DestroyEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - }, - - &CBDEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - Destroy: b.Destroy, - }, - - // Handle destroy time transformations for output and local values. - // Reverse the edges from outputs and locals, so that - // interpolations don't fail during destroy. - // Create a destroy node for outputs to remove them from the state. - // Prune unreferenced values, which may have interpolations that can't - // be resolved. - GraphTransformIf( - func() bool { return b.Destroy }, - GraphTransformMulti( - &DestroyValueReferenceTransformer{}, - &DestroyOutputTransformer{}, - &PruneUnusedValuesTransformer{}, - ), - ), - - // Add the node to fix the state count boundaries - &CountBoundaryTransformer{ - Config: b.Config, - }, - - // Target - &TargetsTransformer{Targets: b.Targets}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go deleted file mode 100644 index 32fe5f97..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go +++ /dev/null @@ -1,97 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for -// planning a pure-destroy. -// -// Planning a pure destroy operation is simple because we can ignore most -// ordering configuration and simply reverse the state. -type DestroyPlanGraphBuilder struct { - // Config is the configuration tree to build the plan from. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "DestroyPlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { - concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodePlanDestroyableResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Creates nodes for the resource instances tracked in the state. - &StateTransformer{ - ConcreteCurrent: concreteResourceInstance, - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Destruction ordering. We require this only so that - // targeting below will prune the correct things. - &DestroyEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - }, - - // Target. Note we don't set "Destroy: true" here since we already - // created proper destroy ordering. - &TargetsTransformer{Targets: b.Targets}, - - // Single root - &RootTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go deleted file mode 100644 index 8a0bcf5b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go +++ /dev/null @@ -1,108 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable -// for evaluating in-memory values (input variables, local values, output -// values) in the state without any other side-effects. -// -// This graph is used only in weird cases, such as the "terraform console" -// CLI command, where we need to evaluate expressions against the state -// without taking any other actions. -// -// The generated graph will include nodes for providers, resources, etc -// just to allow indirect dependencies to be resolved, but these nodes will -// not take any actions themselves since we assume that their parts of the -// state, if any, are already complete. -// -// Although the providers are never configured, they must still be available -// in order to obtain schema information used for type checking, etc. -type EvalGraphBuilder struct { - // Config is the configuration tree. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas -} - -// See GraphBuilder -func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: true, - Name: "EvalGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *EvalGraphBuilder) Steps() []GraphTransformer { - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeEvalableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Creates all the data resources that aren't in the state. This will also - // add any orphans from scaling in as destroy nodes. - &ConfigTransformer{ - Concrete: nil, // just use the abstract type - Config: b.Config, - Unique: true, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Although we don't configure providers, we do still start them up - // to get their schemas, and so we must shut them down again here. - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - - // Remove redundant edges to simplify the graph. - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go deleted file mode 100644 index dcbb10e6..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go +++ /dev/null @@ -1,100 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ImportGraphBuilder implements GraphBuilder and is responsible for building -// a graph for importing resources into Terraform. This is a much, much -// simpler graph than a normal configuration graph. -type ImportGraphBuilder struct { - // ImportTargets are the list of resources to import. - ImportTargets []*ImportTarget - - // Module is a configuration to build the graph from. See ImportOpts.Config. - Config *configs.Config - - // Components is the factory for our available plugin components. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas -} - -// Build builds the graph according to the steps returned by Steps. -func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: true, - Name: "ImportGraphBuilder", - }).Build(path) -} - -// Steps returns the ordered list of GraphTransformers that must be executed -// to build a complete graph. -func (b *ImportGraphBuilder) Steps() []GraphTransformer { - // Get the module. If we don't have one, we just use an empty tree - // so that the transform still works but does nothing. - config := b.Config - if config == nil { - config = configs.NewEmptyConfig() - } - - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Create all our resources from the configuration and state - &ConfigTransformer{Config: config}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add the import steps - &ImportStateTransformer{Targets: b.ImportTargets}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, config), - - // This validates that the providers only depend on variables - &ImportProviderValidateTransformer{}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - - // Optimize - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go deleted file mode 100644 index bcd119b3..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go +++ /dev/null @@ -1,204 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// PlanGraphBuilder implements GraphBuilder and is responsible for building -// a graph for planning (creating a Terraform Diff). -// -// The primary difference between this graph and others: -// -// * Based on the config since it represents the target state -// -// * Ignores lifecycle options since no lifecycle events occur here. This -// simplifies the graph significantly since complex transforms such as -// create-before-destroy can be completely ignored. -// -type PlanGraphBuilder struct { - // Config is the configuration tree to build a plan from. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Validate will do structural validation of the graph. - Validate bool - - // CustomConcrete can be set to customize the node types created - // for various parts of the plan. This is useful in order to customize - // the plan behavior. - CustomConcrete bool - ConcreteProvider ConcreteProviderNodeFunc - ConcreteResource ConcreteResourceNodeFunc - ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc - - once sync.Once -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "PlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Steps() []GraphTransformer { - b.once.Do(b.init) - - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - steps := []GraphTransformer{ - // Creates all the resources represented in the config - &ConfigTransformer{ - Concrete: b.ConcreteResource, - Config: b.Config, - }, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add orphan resources - &OrphanResourceInstanceTransformer{ - Concrete: b.ConcreteResourceOrphan, - State: b.State, - Config: b.Config, - }, - - // We also need nodes for any deposed instance objects present in the - // state, so we can plan to destroy them. (This intentionally - // skips creating nodes for _current_ objects, since ConfigTransformer - // created nodes that will do that during DynamicExpand.) - &StateTransformer{ - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Create orphan output nodes - &OrphanOutputTransformer{ - Config: b.Config, - State: b.State, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, - &ProvisionerTransformer{}, - - // Add module variables - &ModuleVariableTransformer{ - Config: b.Config, - }, - - TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config), - - // Remove modules no longer present in the config - &RemovedModuleTransformer{Config: b.Config, State: b.State}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Add the node to fix the state count boundaries - &CountBoundaryTransformer{ - Config: b.Config, - }, - - // Target - &TargetsTransformer{ - Targets: b.Targets, - - // Resource nodes from config have not yet been expanded for - // "count", so we must apply targeting without indices. Exact - // targeting will be dealt with later when these resources - // DynamicExpand. - IgnoreIndices: true, - }, - - // Detect when create_before_destroy must be forced on for a particular - // node due to dependency edges, to avoid graph cycles during apply. - &ForcedCBDTransformer{}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} - -func (b *PlanGraphBuilder) init() { - // Do nothing if the user requests customizing the fields - if b.CustomConcrete { - return - } - - b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &NodePlannableResource{ - NodeAbstractResource: a, - } - } - - b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go deleted file mode 100644 index fad7bf16..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go +++ /dev/null @@ -1,194 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// RefreshGraphBuilder implements GraphBuilder and is responsible for building -// a graph for refreshing (updating the Terraform state). -// -// The primary difference between this graph and others: -// -// * Based on the state since it represents the only resources that -// need to be refreshed. -// -// * Ignores lifecycle options since no lifecycle events occur here. This -// simplifies the graph significantly since complex transforms such as -// create-before-destroy can be completely ignored. -// -type RefreshGraphBuilder struct { - // Config is the configuration tree. - Config *configs.Config - - // State is the prior state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "RefreshGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *RefreshGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableManagedResource{ - NodeAbstractResource: a, - } - } - - concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - // The "Plan" node type also handles refreshing behavior. - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableDataResource{ - NodeAbstractResource: a, - } - } - - steps := []GraphTransformer{ - // Creates all the managed resources that aren't in the state, but only if - // we have a state already. No resources in state means there's not - // anything to refresh. - func() GraphTransformer { - if b.State.HasResources() { - return &ConfigTransformer{ - Concrete: concreteManagedResource, - Config: b.Config, - Unique: true, - ModeFilter: true, - Mode: addrs.ManagedResourceMode, - } - } - log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer") - return nil - }(), - - // Creates all the data resources that aren't in the state. This will also - // add any orphans from scaling in as destroy nodes. - &ConfigTransformer{ - Concrete: concreteDataResource, - Config: b.Config, - Unique: true, - ModeFilter: true, - Mode: addrs.DataResourceMode, - }, - - // Add any fully-orphaned resources from config (ones that have been - // removed completely, not ones that are just orphaned due to a scaled-in - // count. - &OrphanResourceInstanceTransformer{ - Concrete: concreteManagedResourceInstance, - State: b.State, - Config: b.Config, - }, - - // We also need nodes for any deposed instance objects present in the - // state, so we can check if they still exist. (This intentionally - // skips creating nodes for _current_ objects, since ConfigTransformer - // created nodes that will do that during DynamicExpand.) - &StateTransformer{ - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Target - &TargetsTransformer{ - Targets: b.Targets, - - // Resource nodes from config have not yet been expanded for - // "count", so we must apply targeting without indices. Exact - // targeting will be dealt with later when these resources - // DynamicExpand. - IgnoreIndices: true, - }, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go deleted file mode 100644 index 0aa8b915..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go +++ /dev/null @@ -1,34 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// ValidateGraphBuilder creates the graph for the validate operation. -// -// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that -// we only have to validate what we'd normally plan anyways. The -// PlanGraphBuilder given will be modified so it shouldn't be used for anything -// else after calling this function. -func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { - // We're going to customize the concrete functions - p.CustomConcrete = true - - // Set the provider to the normal provider. This will ask for input. - p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &NodeValidatableResource{ - NodeAbstractResource: a, - } - } - - // We purposely don't set any other concrete types since they don't - // require validation. - - return p -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go deleted file mode 100644 index 5dbf415f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - -// GraphDot returns the dot formatting of a visual representation of -// the given Terraform graph. -func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { - return string(g.Dot(opts)), nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go deleted file mode 100644 index a005ea5a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go +++ /dev/null @@ -1,11 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// GraphNodeSubPath says that a node is part of a graph with a -// different path, and the context should be adjusted accordingly. -type GraphNodeSubPath interface { - Path() addrs.ModuleInstance -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go deleted file mode 100644 index d699376f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go +++ /dev/null @@ -1,32 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// GraphWalker is an interface that can be implemented that when used -// with Graph.Walk will invoke the given callbacks under certain events. -type GraphWalker interface { - EnterPath(addrs.ModuleInstance) EvalContext - ExitPath(addrs.ModuleInstance) - EnterVertex(dag.Vertex) - ExitVertex(dag.Vertex, tfdiags.Diagnostics) - EnterEvalTree(dag.Vertex, EvalNode) EvalNode - ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics -} - -// NullGraphWalker is a GraphWalker implementation that does nothing. -// This can be embedded within other GraphWalker implementations for easily -// implementing all the required functions. -type NullGraphWalker struct{} - -func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) } -func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {} -func (NullGraphWalker) EnterVertex(dag.Vertex) {} -func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {} -func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } -func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go deleted file mode 100644 index 11fb2fd0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go +++ /dev/null @@ -1,157 +0,0 @@ -package terraform - -import ( - "context" - "log" - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ContextGraphWalker is the GraphWalker implementation used with the -// Context struct to walk and evaluate the graph. -type ContextGraphWalker struct { - NullGraphWalker - - // Configurable values - Context *Context - State *states.SyncState // Used for safe concurrent access to state - Changes *plans.ChangesSync // Used for safe concurrent writes to changes - Operation walkOperation - StopContext context.Context - RootVariableValues InputValues - - // This is an output. Do not set this, nor read it while a graph walk - // is in progress. - NonFatalDiagnostics tfdiags.Diagnostics - - errorLock sync.Mutex - once sync.Once - contexts map[string]*BuiltinEvalContext - contextLock sync.Mutex - variableValues map[string]map[string]cty.Value - variableValuesLock sync.Mutex - providerCache map[string]providers.Interface - providerSchemas map[string]*ProviderSchema - providerLock sync.Mutex - provisionerCache map[string]provisioners.Interface - provisionerSchemas map[string]*configschema.Block - provisionerLock sync.Mutex -} - -func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext { - w.once.Do(w.init) - - w.contextLock.Lock() - defer w.contextLock.Unlock() - - // If we already have a context for this path cached, use that - key := path.String() - if ctx, ok := w.contexts[key]; ok { - return ctx - } - - // Our evaluator shares some locks with the main context and the walker - // so that we can safely run multiple evaluations at once across - // different modules. - evaluator := &Evaluator{ - Meta: w.Context.meta, - Config: w.Context.config, - Operation: w.Operation, - State: w.State, - Changes: w.Changes, - Schemas: w.Context.schemas, - VariableValues: w.variableValues, - VariableValuesLock: &w.variableValuesLock, - } - - ctx := &BuiltinEvalContext{ - StopContext: w.StopContext, - PathValue: path, - Hooks: w.Context.hooks, - InputValue: w.Context.uiInput, - Components: w.Context.components, - Schemas: w.Context.schemas, - ProviderCache: w.providerCache, - ProviderInputConfig: w.Context.providerInputConfig, - ProviderLock: &w.providerLock, - ProvisionerCache: w.provisionerCache, - ProvisionerLock: &w.provisionerLock, - ChangesValue: w.Changes, - StateValue: w.State, - Evaluator: evaluator, - VariableValues: w.variableValues, - VariableValuesLock: &w.variableValuesLock, - } - - w.contexts[key] = ctx - return ctx -} - -func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { - log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v)) - - // Acquire a lock on the semaphore - w.Context.parallelSem.Acquire() - - // We want to filter the evaluation tree to only include operations - // that belong in this operation. - return EvalFilter(n, EvalNodeFilterOp(w.Operation)) -} - -func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics { - log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v)) - - // Release the semaphore - w.Context.parallelSem.Release() - - if err == nil { - return nil - } - - // Acquire the lock because anything is going to require a lock. - w.errorLock.Lock() - defer w.errorLock.Unlock() - - // If the error is non-fatal then we'll accumulate its diagnostics in our - // non-fatal list, rather than returning it directly, so that the graph - // walk can continue. - if nferr, ok := err.(tfdiags.NonFatalError); ok { - log.Printf("[WARN] %s: %s", dag.VertexName(v), nferr) - w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics) - return nil - } - - // Otherwise, we'll let our usual diagnostics machinery figure out how to - // unpack this as one or more diagnostic messages and return that. If we - // get down here then the returned diagnostics will contain at least one - // error, causing the graph walk to halt. - var diags tfdiags.Diagnostics - diags = diags.Append(err) - return diags -} - -func (w *ContextGraphWalker) init() { - w.contexts = make(map[string]*BuiltinEvalContext) - w.providerCache = make(map[string]providers.Interface) - w.providerSchemas = make(map[string]*ProviderSchema) - w.provisionerCache = make(map[string]provisioners.Interface) - w.provisionerSchemas = make(map[string]*configschema.Block) - w.variableValues = make(map[string]map[string]cty.Value) - - // Populate root module variable values. Other modules will be populated - // during the graph walk. - w.variableValues[""] = make(map[string]cty.Value) - for k, iv := range w.RootVariableValues { - w.variableValues[""][k] = iv.Value - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go deleted file mode 100644 index 859f6fb1..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go +++ /dev/null @@ -1,18 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=walkOperation graph_walk_operation.go - -// walkOperation is an enum which tells the walkContext what to do. -type walkOperation byte - -const ( - walkInvalid walkOperation = iota - walkApply - walkPlan - walkPlanDestroy - walkRefresh - walkValidate - walkDestroy - walkImport - walkEval // used just to prepare EvalContext for expression evaluation, with no other actions -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go deleted file mode 100644 index b51e1a26..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[GraphTypeInvalid-0] - _ = x[GraphTypeLegacy-1] - _ = x[GraphTypeRefresh-2] - _ = x[GraphTypePlan-3] - _ = x[GraphTypePlanDestroy-4] - _ = x[GraphTypeApply-5] - _ = x[GraphTypeValidate-6] - _ = x[GraphTypeEval-7] -} - -const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval" - -var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124} - -func (i GraphType) String() string { - if i >= GraphType(len(_GraphType_index)-1) { - return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go deleted file mode 100644 index b5be9482..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go +++ /dev/null @@ -1,145 +0,0 @@ -package terraform - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// HookAction is an enum of actions that can be taken as a result of a hook -// callback. This allows you to modify the behavior of Terraform at runtime. -type HookAction byte - -const ( - // HookActionContinue continues with processing as usual. - HookActionContinue HookAction = iota - - // HookActionHalt halts immediately: no more hooks are processed - // and the action that Terraform was about to take is cancelled. - HookActionHalt -) - -// Hook is the interface that must be implemented to hook into various -// parts of Terraform, allowing you to inspect or change behavior at runtime. -// -// There are MANY hook points into Terraform. If you only want to implement -// some hook points, but not all (which is the likely case), then embed the -// NilHook into your struct, which implements all of the interface but does -// nothing. Then, override only the functions you want to implement. -type Hook interface { - // PreApply and PostApply are called before and after an action for a - // single instance is applied. The error argument in PostApply is the - // error, if any, that was returned from the provider Apply call itself. - PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) - PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) - - // PreDiff and PostDiff are called before and after a provider is given - // the opportunity to customize the proposed new state to produce the - // planned new state. - PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) - PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) - - // The provisioning hooks signal both the overall start end end of - // provisioning for a particular instance and of each of the individual - // configured provisioners for each instance. The sequence of these - // for a given instance might look something like this: - // - // PreProvisionInstance(aws_instance.foo[1], ...) - // PreProvisionInstanceStep(aws_instance.foo[1], "file") - // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil) - // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec") - // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...") - // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...") - // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil) - // PostProvisionInstance(aws_instance.foo[1], ...) - // - // ProvisionOutput is called with output sent back by the provisioners. - // This will be called multiple times as output comes in, with each call - // representing one line of output. It cannot control whether the - // provisioner continues running. - PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) - PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) - PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) - PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) - ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) - - // PreRefresh and PostRefresh are called before and after a single - // resource state is refreshed, respectively. - PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) - PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) - - // PreImportState and PostImportState are called before and after - // (respectively) each state import operation for a given resource address. - PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) - PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) - - // PostStateUpdate is called each time the state is updated. It receives - // a deep copy of the state, which it may therefore access freely without - // any need for locks to protect from concurrent writes from the caller. - PostStateUpdate(new *states.State) (HookAction, error) -} - -// NilHook is a Hook implementation that does nothing. It exists only to -// simplify implementing hooks. You can embed this into your Hook implementation -// and only implement the functions you are interested in. -type NilHook struct{} - -var _ Hook = (*NilHook)(nil) - -func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { -} - -func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { - return HookActionContinue, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go deleted file mode 100644 index 74a29bde..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go +++ /dev/null @@ -1,274 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// MockHook is an implementation of Hook that can be used for tests. -// It records all of its function calls. -type MockHook struct { - sync.Mutex - - PreApplyCalled bool - PreApplyAddr addrs.AbsResourceInstance - PreApplyGen states.Generation - PreApplyAction plans.Action - PreApplyPriorState cty.Value - PreApplyPlannedState cty.Value - PreApplyReturn HookAction - PreApplyError error - - PostApplyCalled bool - PostApplyAddr addrs.AbsResourceInstance - PostApplyGen states.Generation - PostApplyNewState cty.Value - PostApplyError error - PostApplyReturn HookAction - PostApplyReturnError error - PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error) - - PreDiffCalled bool - PreDiffAddr addrs.AbsResourceInstance - PreDiffGen states.Generation - PreDiffPriorState cty.Value - PreDiffProposedState cty.Value - PreDiffReturn HookAction - PreDiffError error - - PostDiffCalled bool - PostDiffAddr addrs.AbsResourceInstance - PostDiffGen states.Generation - PostDiffAction plans.Action - PostDiffPriorState cty.Value - PostDiffPlannedState cty.Value - PostDiffReturn HookAction - PostDiffError error - - PreProvisionInstanceCalled bool - PreProvisionInstanceAddr addrs.AbsResourceInstance - PreProvisionInstanceState cty.Value - PreProvisionInstanceReturn HookAction - PreProvisionInstanceError error - - PostProvisionInstanceCalled bool - PostProvisionInstanceAddr addrs.AbsResourceInstance - PostProvisionInstanceState cty.Value - PostProvisionInstanceReturn HookAction - PostProvisionInstanceError error - - PreProvisionInstanceStepCalled bool - PreProvisionInstanceStepAddr addrs.AbsResourceInstance - PreProvisionInstanceStepProvisionerType string - PreProvisionInstanceStepReturn HookAction - PreProvisionInstanceStepError error - - PostProvisionInstanceStepCalled bool - PostProvisionInstanceStepAddr addrs.AbsResourceInstance - PostProvisionInstanceStepProvisionerType string - PostProvisionInstanceStepErrorArg error - PostProvisionInstanceStepReturn HookAction - PostProvisionInstanceStepError error - - ProvisionOutputCalled bool - ProvisionOutputAddr addrs.AbsResourceInstance - ProvisionOutputProvisionerType string - ProvisionOutputMessage string - - PreRefreshCalled bool - PreRefreshAddr addrs.AbsResourceInstance - PreRefreshGen states.Generation - PreRefreshPriorState cty.Value - PreRefreshReturn HookAction - PreRefreshError error - - PostRefreshCalled bool - PostRefreshAddr addrs.AbsResourceInstance - PostRefreshGen states.Generation - PostRefreshPriorState cty.Value - PostRefreshNewState cty.Value - PostRefreshReturn HookAction - PostRefreshError error - - PreImportStateCalled bool - PreImportStateAddr addrs.AbsResourceInstance - PreImportStateID string - PreImportStateReturn HookAction - PreImportStateError error - - PostImportStateCalled bool - PostImportStateAddr addrs.AbsResourceInstance - PostImportStateNewStates []providers.ImportedResource - PostImportStateReturn HookAction - PostImportStateError error - - PostStateUpdateCalled bool - PostStateUpdateState *states.State - PostStateUpdateReturn HookAction - PostStateUpdateError error -} - -var _ Hook = (*MockHook)(nil) - -func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreApplyCalled = true - h.PreApplyAddr = addr - h.PreApplyGen = gen - h.PreApplyAction = action - h.PreApplyPriorState = priorState - h.PreApplyPlannedState = plannedNewState - return h.PreApplyReturn, h.PreApplyError -} - -func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostApplyCalled = true - h.PostApplyAddr = addr - h.PostApplyGen = gen - h.PostApplyNewState = newState - h.PostApplyError = err - - if h.PostApplyFn != nil { - return h.PostApplyFn(addr, gen, newState, err) - } - - return h.PostApplyReturn, h.PostApplyReturnError -} - -func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreDiffCalled = true - h.PreDiffAddr = addr - h.PreDiffGen = gen - h.PreDiffPriorState = priorState - h.PreDiffProposedState = proposedNewState - return h.PreDiffReturn, h.PreDiffError -} - -func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostDiffCalled = true - h.PostDiffAddr = addr - h.PostDiffGen = gen - h.PostDiffAction = action - h.PostDiffPriorState = priorState - h.PostDiffPlannedState = plannedNewState - return h.PostDiffReturn, h.PostDiffError -} - -func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionInstanceCalled = true - h.PreProvisionInstanceAddr = addr - h.PreProvisionInstanceState = state - return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError -} - -func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionInstanceCalled = true - h.PostProvisionInstanceAddr = addr - h.PostProvisionInstanceState = state - return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError -} - -func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionInstanceStepCalled = true - h.PreProvisionInstanceStepAddr = addr - h.PreProvisionInstanceStepProvisionerType = typeName - return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError -} - -func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionInstanceStepCalled = true - h.PostProvisionInstanceStepAddr = addr - h.PostProvisionInstanceStepProvisionerType = typeName - h.PostProvisionInstanceStepErrorArg = err - return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError -} - -func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { - h.Lock() - defer h.Unlock() - - h.ProvisionOutputCalled = true - h.ProvisionOutputAddr = addr - h.ProvisionOutputProvisionerType = typeName - h.ProvisionOutputMessage = line -} - -func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreRefreshCalled = true - h.PreRefreshAddr = addr - h.PreRefreshGen = gen - h.PreRefreshPriorState = priorState - return h.PreRefreshReturn, h.PreRefreshError -} - -func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostRefreshCalled = true - h.PostRefreshAddr = addr - h.PostRefreshPriorState = priorState - h.PostRefreshNewState = newState - return h.PostRefreshReturn, h.PostRefreshError -} - -func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreImportStateCalled = true - h.PreImportStateAddr = addr - h.PreImportStateID = importID - return h.PreImportStateReturn, h.PreImportStateError -} - -func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostImportStateCalled = true - h.PostImportStateAddr = addr - h.PostImportStateNewStates = imported - return h.PostImportStateReturn, h.PostImportStateError -} - -func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostStateUpdateCalled = true - h.PostStateUpdateState = new - return h.PostStateUpdateReturn, h.PostStateUpdateError -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go deleted file mode 100644 index 42c3d20c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go +++ /dev/null @@ -1,100 +0,0 @@ -package terraform - -import ( - "sync/atomic" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// stopHook is a private Hook implementation that Terraform uses to -// signal when to stop or cancel actions. -type stopHook struct { - stop uint32 -} - -var _ Hook = (*stopHook)(nil) - -func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { -} - -func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) hook() (HookAction, error) { - if h.Stopped() { - // FIXME: This should really return an error since stopping partway - // through is not a successful run-to-completion, but we'll need to - // introduce that cautiously since existing automation solutions may - // be depending on this behavior. - return HookActionHalt, nil - } - - return HookActionContinue, nil -} - -// reset should be called within the lock context -func (h *stopHook) Reset() { - atomic.StoreUint32(&h.stop, 0) -} - -func (h *stopHook) Stop() { - atomic.StoreUint32(&h.stop, 1) -} - -func (h *stopHook) Stopped() bool { - return atomic.LoadUint32(&h.stop) == 1 -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go deleted file mode 100644 index 375a8638..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go +++ /dev/null @@ -1,13 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=InstanceType instancetype.go - -// InstanceType is an enum of the various types of instances store in the State -type InstanceType int - -const ( - TypeInvalid InstanceType = iota - TypePrimary - TypeTainted - TypeDeposed -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go deleted file mode 100644 index 95b7a980..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go +++ /dev/null @@ -1,26 +0,0 @@ -// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TypeInvalid-0] - _ = x[TypePrimary-1] - _ = x[TypeTainted-2] - _ = x[TypeDeposed-3] -} - -const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" - -var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} - -func (i InstanceType) String() string { - if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { - return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go deleted file mode 100644 index f1434e62..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go +++ /dev/null @@ -1,202 +0,0 @@ -package terraform - -import ( - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps" - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// ConfigTreeDependencies returns the dependencies of the tree of modules -// described by the given configuration and state. -// -// Both configuration and state are required because there can be resources -// implied by instances in the state that no longer exist in config. -func ConfigTreeDependencies(root *configs.Config, state *states.State) *moduledeps.Module { - // First we walk the configuration tree to build the overall structure - // and capture the explicit/implicit/inherited provider dependencies. - deps := configTreeConfigDependencies(root, nil) - - // Next we walk over the resources in the state to catch any additional - // dependencies created by existing resources that are no longer in config. - // Most things we find in state will already be present in 'deps', but - // we're interested in the rare thing that isn't. - configTreeMergeStateDependencies(deps, state) - - return deps -} - -func configTreeConfigDependencies(root *configs.Config, inheritProviders map[string]*configs.Provider) *moduledeps.Module { - if root == nil { - // If no config is provided, we'll make a synthetic root. - // This isn't necessarily correct if we're called with a nil that - // *isn't* at the root, but in practice that can never happen. - return &moduledeps.Module{ - Name: "root", - Providers: make(moduledeps.Providers), - } - } - - name := "root" - if len(root.Path) != 0 { - name = root.Path[len(root.Path)-1] - } - - ret := &moduledeps.Module{ - Name: name, - } - - module := root.Module - - // Provider dependencies - { - providers := make(moduledeps.Providers) - - // The main way to declare a provider dependency is explicitly inside - // the "terraform" block, which allows declaring a requirement without - // also creating a configuration. - for fullName, constraints := range module.ProviderRequirements { - inst := moduledeps.ProviderInstance(fullName) - - // The handling here is a bit fiddly because the moduledeps package - // was designed around the legacy (pre-0.12) configuration model - // and hasn't yet been revised to handle the new model. As a result, - // we need to do some translation here. - // FIXME: Eventually we should adjust the underlying model so we - // can also retain the source location of each constraint, for - // more informative output from the "terraform providers" command. - var rawConstraints version.Constraints - for _, constraint := range constraints { - rawConstraints = append(rawConstraints, constraint.Required...) - } - discoConstraints := discovery.NewConstraints(rawConstraints) - - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discoConstraints, - Reason: moduledeps.ProviderDependencyExplicit, - } - } - - // Provider configurations can also include version constraints, - // allowing for more terse declaration in situations where both a - // configuration and a constraint are defined in the same module. - for fullName, pCfg := range module.ProviderConfigs { - inst := moduledeps.ProviderInstance(fullName) - discoConstraints := discovery.AllVersions - if pCfg.Version.Required != nil { - discoConstraints = discovery.NewConstraints(pCfg.Version.Required) - } - if existing, exists := providers[inst]; exists { - existing.Constraints = existing.Constraints.Append(discoConstraints) - } else { - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discoConstraints, - Reason: moduledeps.ProviderDependencyExplicit, - } - } - } - - // Each resource in the configuration creates an *implicit* provider - // dependency, though we'll only record it if there isn't already - // an explicit dependency on the same provider. - for _, rc := range module.ManagedResources { - addr := rc.ProviderConfigAddr() - inst := moduledeps.ProviderInstance(addr.StringCompact()) - if _, exists := providers[inst]; exists { - // Explicit dependency already present - continue - } - - reason := moduledeps.ProviderDependencyImplicit - if _, inherited := inheritProviders[addr.StringCompact()]; inherited { - reason = moduledeps.ProviderDependencyInherited - } - - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: reason, - } - } - for _, rc := range module.DataResources { - addr := rc.ProviderConfigAddr() - inst := moduledeps.ProviderInstance(addr.StringCompact()) - if _, exists := providers[inst]; exists { - // Explicit dependency already present - continue - } - - reason := moduledeps.ProviderDependencyImplicit - if _, inherited := inheritProviders[addr.String()]; inherited { - reason = moduledeps.ProviderDependencyInherited - } - - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: reason, - } - } - - ret.Providers = providers - } - - childInherit := make(map[string]*configs.Provider) - for k, v := range inheritProviders { - childInherit[k] = v - } - for k, v := range module.ProviderConfigs { - childInherit[k] = v - } - for _, c := range root.Children { - ret.Children = append(ret.Children, configTreeConfigDependencies(c, childInherit)) - } - - return ret -} - -func configTreeMergeStateDependencies(root *moduledeps.Module, state *states.State) { - if state == nil { - return - } - - findModule := func(path addrs.ModuleInstance) *moduledeps.Module { - module := root - for _, step := range path { - var next *moduledeps.Module - for _, cm := range module.Children { - if cm.Name == step.Name { - next = cm - break - } - } - - if next == nil { - // If we didn't find a next node, we'll need to make one - next = &moduledeps.Module{ - Name: step.Name, - Providers: make(moduledeps.Providers), - } - module.Children = append(module.Children, next) - } - - module = next - } - return module - } - - for _, ms := range state.Modules { - module := findModule(ms.Addr) - - for _, rs := range ms.Resources { - inst := moduledeps.ProviderInstance(rs.ProviderConfig.ProviderConfig.StringCompact()) - if _, exists := module.Providers[inst]; !exists { - module.Providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: moduledeps.ProviderDependencyFromState, - } - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go deleted file mode 100644 index acd8262b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go +++ /dev/null @@ -1,22 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// NodeCountBoundary fixes up any transitions between "each modes" in objects -// saved in state, such as switching from NoEach to EachInt. -type NodeCountBoundary struct { - Config *configs.Config -} - -func (n *NodeCountBoundary) Name() string { - return "meta.count-boundary (EachMode fixup)" -} - -// GraphNodeEvalable -func (n *NodeCountBoundary) EvalTree() EvalNode { - return &EvalCountFixZeroOneBoundaryGlobal{ - Config: n.Config, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go deleted file mode 100644 index 56a33bce..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go +++ /dev/null @@ -1,40 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// NodeDestroyableDataResourceInstance represents a resource that is "destroyable": -// it is ready to be destroyed. -type NodeDestroyableDataResourceInstance struct { - *NodeAbstractResourceInstance -} - -// GraphNodeEvalable -func (n *NodeDestroyableDataResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var providerSchema *ProviderSchema - // We don't need the provider, but we're calling EvalGetProvider to load the - // schema. - var provider providers.Interface - - // Just destroy it. - var state *states.ResourceInstanceObject - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalWriteState{ - Addr: addr.Resource, - State: &state, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go deleted file mode 100644 index 56283c0a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go +++ /dev/null @@ -1,229 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// NodeRefreshableDataResource represents a resource that is "refreshable". -type NodeRefreshableDataResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeSubPath = (*NodeRefreshableDataResource)(nil) - _ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil) - _ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil) - _ GraphNodeResource = (*NodeRefreshableDataResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil) -) - -// GraphNodeDynamicExpandable -func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - count, countKnown, countDiags := evaluateResourceCountExpressionKnown(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - if !countKnown { - // If the count isn't known yet, we'll skip refreshing and try expansion - // again during the plan walk. - return nil, nil - } - - forEachMap, forEachKnown, forEachDiags := evaluateResourceForEachExpressionKnown(n.Config.ForEach, ctx) - diags = diags.Append(forEachDiags) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - if !forEachKnown { - // If the for_each isn't known yet, we'll skip refreshing and try expansion - // again during the plan walk. - return nil, nil - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - - return &NodeRefreshableDataResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // We also need a destroyable resource for orphans that are a result of a - // scaled-in count. - concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and provider since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - - return &NodeDestroyableDataResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - }, - - // Add the count orphans. As these are orphaned refresh nodes, we add them - // directly as NodeDestroyableDataResource. - &OrphanResourceCountTransformer{ - Concrete: concreteResourceDestroyable, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeRefreshableDataResource", - } - - graph, diags := b.Build(ctx.Path()) - return graph, diags.ErrWithWarnings() -} - -// NodeRefreshableDataResourceInstance represents a single resource instance -// that is refreshable. -type NodeRefreshableDataResourceInstance struct { - *NodeAbstractResourceInstance -} - -// GraphNodeEvalable -func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // These variables are the state for the eval sequence below, and are - // updated through pointers. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var configVal cty.Value - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Always destroy the existing state first, since we must - // make sure that values from a previous read will not - // get interpolated if we end up needing to defer our - // loading until apply time. - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, // a pointer to nil, here - ProviderSchema: &providerSchema, - }, - - // EvalReadData will _attempt_ to read the data source, but may - // generate an incomplete planned object if the configuration - // includes values that won't be known until apply. - &EvalReadData{ - Addr: addr.Resource, - Config: n.Config, - Dependencies: n.StateReferences(), - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - OutputChange: &change, - OutputConfigValue: &configVal, - OutputState: &state, - // If the config explicitly has a depends_on for this data - // source, assume the intention is to prevent refreshing ahead - // of that dependency, and therefore we need to deal with this - // resource during the apply phase. We do that by forcing this - // read to result in a plan. - ForcePlanRead: len(n.Config.DependsOn) > 0, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return (*state).Status != states.ObjectPlanned, nil - }, - Then: &EvalSequence{ - Nodes: []EvalNode{ - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - &EvalUpdateStateHook{}, - }, - }, - Else: &EvalSequence{ - // We can't deal with this yet, so we'll repeat this step - // during the plan walk to produce a planned change to read - // this during the apply walk. However, we do still need to - // save the generated change and partial state so that - // results from it can be included in other data resources - // or provider configurations during the refresh walk. - // (The planned object we save in the state here will be - // pruned out at the end of the refresh walk, returning - // it back to being unset again for subsequent walks.) - Nodes: []EvalNode{ - &EvalWriteDiff{ - Addr: addr.Resource, - Change: &change, - ProviderSchema: &providerSchema, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - }, - }, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go deleted file mode 100644 index 38681d83..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" -) - -// NodeLocal represents a named local value in a particular module. -// -// Local value nodes only have one operation, common to all walk types: -// evaluate the result and place it in state. -type NodeLocal struct { - Addr addrs.AbsLocalValue - Config *configs.Local -} - -var ( - _ GraphNodeSubPath = (*NodeLocal)(nil) - _ RemovableIfNotTargeted = (*NodeLocal)(nil) - _ GraphNodeReferenceable = (*NodeLocal)(nil) - _ GraphNodeReferencer = (*NodeLocal)(nil) - _ GraphNodeEvalable = (*NodeLocal)(nil) - _ dag.GraphNodeDotter = (*NodeLocal)(nil) -) - -func (n *NodeLocal) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeLocal) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeLocal) RemoveIfNotTargeted() bool { - return true -} - -// GraphNodeReferenceable -func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.LocalValue} -} - -// GraphNodeReferencer -func (n *NodeLocal) References() []*addrs.Reference { - refs, _ := lang.ReferencesInExpr(n.Config.Expr) - return appendResourceDestroyReferences(refs) -} - -// GraphNodeEvalable -func (n *NodeLocal) EvalTree() EvalNode { - return &EvalLocal{ - Addr: n.Addr.LocalValue, - Expr: n.Config.Expr, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go deleted file mode 100644 index 6e3cb41d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go +++ /dev/null @@ -1,89 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// NodeModuleRemoved represents a module that is no longer in the -// config. -type NodeModuleRemoved struct { - Addr addrs.ModuleInstance -} - -var ( - _ GraphNodeSubPath = (*NodeModuleRemoved)(nil) - _ RemovableIfNotTargeted = (*NodeModuleRemoved)(nil) - _ GraphNodeEvalable = (*NodeModuleRemoved)(nil) - _ GraphNodeReferencer = (*NodeModuleRemoved)(nil) - _ GraphNodeReferenceOutside = (*NodeModuleRemoved)(nil) -) - -func (n *NodeModuleRemoved) Name() string { - return fmt.Sprintf("%s (removed)", n.Addr.String()) -} - -// GraphNodeSubPath -func (n *NodeModuleRemoved) Path() addrs.ModuleInstance { - return n.Addr -} - -// GraphNodeEvalable -func (n *NodeModuleRemoved) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, - Node: &EvalCheckModuleRemoved{ - Addr: n.Addr, - }, - } -} - -func (n *NodeModuleRemoved) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - // Our "References" implementation indicates that this node depends on - // the call to the module it represents, which implicitly depends on - // everything inside the module. That reference must therefore be - // interpreted in terms of our parent module. - return n.Addr, n.Addr.Parent() -} - -func (n *NodeModuleRemoved) References() []*addrs.Reference { - // We depend on the call to the module we represent, because that - // implicitly then depends on everything inside that module. - // Our ReferenceOutside implementation causes this to be interpreted - // within the parent module. - - _, call := n.Addr.CallInstance() - return []*addrs.Reference{ - { - Subject: call, - - // No source range here, because there's nothing reasonable for - // us to return. - }, - } -} - -// RemovableIfNotTargeted -func (n *NodeModuleRemoved) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// EvalCheckModuleRemoved is an EvalNode implementation that verifies that -// a module has been removed from the state as expected. -type EvalCheckModuleRemoved struct { - Addr addrs.ModuleInstance -} - -func (n *EvalCheckModuleRemoved) Eval(ctx EvalContext) (interface{}, error) { - mod := ctx.State().Module(n.Addr) - if mod != nil { - // If we get here then that indicates a bug either in the states - // module or in an earlier step of the graph walk, since we should've - // pruned out the module when the last resource was removed from it. - return nil, fmt.Errorf("leftover module %s in state that should have been removed; this is a bug in Terraform and should be reported", n.Addr) - } - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go deleted file mode 100644 index 76311a56..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go +++ /dev/null @@ -1,142 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/zclconf/go-cty/cty" -) - -// NodeApplyableModuleVariable represents a module variable input during -// the apply step. -type NodeApplyableModuleVariable struct { - Addr addrs.AbsInputVariableInstance - Config *configs.Variable // Config is the var in the config - Expr hcl.Expression // Expr is the value expression given in the call -} - -// Ensure that we are implementing all of the interfaces we think we are -// implementing. -var ( - _ GraphNodeSubPath = (*NodeApplyableModuleVariable)(nil) - _ RemovableIfNotTargeted = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeReferenceOutside = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeReferenceable = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeReferencer = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeEvalable = (*NodeApplyableModuleVariable)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) -) - -func (n *NodeApplyableModuleVariable) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeApplyableModuleVariable) Path() addrs.ModuleInstance { - // We execute in the parent scope (above our own module) because - // expressions in our value are resolved in that context. - return n.Addr.Module.Parent() -} - -// RemovableIfNotTargeted -func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeReferenceOutside implementation -func (n *NodeApplyableModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - - // Module input variables have their value expressions defined in the - // context of their calling (parent) module, and so references from - // a node of this type should be resolved in the parent module instance. - referencePath = n.Addr.Module.Parent() - - // Input variables are _referenced_ from their own module, though. - selfPath = n.Addr.Module - - return // uses named return values -} - -// GraphNodeReferenceable -func (n *NodeApplyableModuleVariable) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.Variable} -} - -// GraphNodeReferencer -func (n *NodeApplyableModuleVariable) References() []*addrs.Reference { - - // If we have no value expression, we cannot depend on anything. - if n.Expr == nil { - return nil - } - - // Variables in the root don't depend on anything, because their values - // are gathered prior to the graph walk and recorded in the context. - if len(n.Addr.Module) == 0 { - return nil - } - - // Otherwise, we depend on anything referenced by our value expression. - // We ignore diagnostics here under the assumption that we'll re-eval - // all these things later and catch them then; for our purposes here, - // we only care about valid references. - // - // Due to our GraphNodeReferenceOutside implementation, the addresses - // returned by this function are interpreted in the _parent_ module from - // where our associated variable was declared, which is correct because - // our value expression is assigned within a "module" block in the parent - // module. - refs, _ := lang.ReferencesInExpr(n.Expr) - return refs -} - -// GraphNodeEvalable -func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { - // If we have no value, do nothing - if n.Expr == nil { - return &EvalNoop{} - } - - // Otherwise, interpolate the value of this variable and set it - // within the variables mapping. - vals := make(map[string]cty.Value) - - _, call := n.Addr.Module.CallInstance() - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, - walkDestroy, walkValidate}, - Node: &EvalModuleCallArgument{ - Addr: n.Addr.Variable, - Config: n.Config, - Expr: n.Expr, - Values: vals, - - IgnoreDiagnostics: false, - }, - }, - - &EvalSetModuleCallArguments{ - Module: call, - Values: vals, - }, - }, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeApplyableModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go deleted file mode 100644 index 75305712..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go +++ /dev/null @@ -1,200 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" -) - -// NodeApplyableOutput represents an output that is "applyable": -// it is ready to be applied. -type NodeApplyableOutput struct { - Addr addrs.AbsOutputValue - Config *configs.Output // Config is the output in the config -} - -var ( - _ GraphNodeSubPath = (*NodeApplyableOutput)(nil) - _ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil) - _ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil) - _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil) - _ GraphNodeReferencer = (*NodeApplyableOutput)(nil) - _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil) - _ GraphNodeEvalable = (*NodeApplyableOutput)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil) -) - -func (n *NodeApplyableOutput) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeApplyableOutput) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeTargetDownstream -func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { - // If any of the direct dependencies of an output are targeted then - // the output must always be targeted as well, so its value will always - // be up-to-date at the completion of an apply walk. - return true -} - -func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.ModuleInstance) { - - // Output values have their expressions resolved in the context of the - // module where they are defined. - referencePath = addr.Module - - // ...but they are referenced in the context of their calling module. - selfPath = addr.Module.Parent() - - return // uses named return values - -} - -// GraphNodeReferenceOutside implementation -func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - return referenceOutsideForOutput(n.Addr) -} - -func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable { - // An output in the root module can't be referenced at all. - if addr.Module.IsRoot() { - return nil - } - - // Otherwise, we can be referenced via a reference to our output name - // on the parent module's call, or via a reference to the entire call. - // e.g. module.foo.bar or just module.foo . - // Note that our ReferenceOutside method causes these addresses to be - // relative to the calling module, not the module where the output - // was declared. - _, outp := addr.ModuleCallOutput() - _, call := addr.Module.CallInstance() - return []addrs.Referenceable{outp, call} - -} - -// GraphNodeReferenceable -func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable { - return referenceableAddrsForOutput(n.Addr) -} - -func referencesForOutput(c *configs.Output) []*addrs.Reference { - impRefs, _ := lang.ReferencesInExpr(c.Expr) - expRefs, _ := lang.References(c.DependsOn) - l := len(impRefs) + len(expRefs) - if l == 0 { - return nil - } - refs := make([]*addrs.Reference, 0, l) - refs = append(refs, impRefs...) - refs = append(refs, expRefs...) - return refs - -} - -// GraphNodeReferencer -func (n *NodeApplyableOutput) References() []*addrs.Reference { - return appendResourceDestroyReferences(referencesForOutput(n.Config)) -} - -// GraphNodeEvalable -func (n *NodeApplyableOutput) EvalTree() EvalNode { - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, - Node: &EvalWriteOutput{ - Addr: n.Addr.OutputValue, - Sensitive: n.Config.Sensitive, - Expr: n.Config.Expr, - }, - }, - }, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} - -// NodeDestroyableOutput represents an output that is "destroybale": -// its application will remove the output from the state. -type NodeDestroyableOutput struct { - Addr addrs.AbsOutputValue - Config *configs.Output // Config is the output in the config -} - -var ( - _ GraphNodeSubPath = (*NodeDestroyableOutput)(nil) - _ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil) - _ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil) - _ GraphNodeReferencer = (*NodeDestroyableOutput)(nil) - _ GraphNodeEvalable = (*NodeDestroyableOutput)(nil) - _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil) -) - -func (n *NodeDestroyableOutput) Name() string { - return fmt.Sprintf("%s (destroy)", n.Addr.String()) -} - -// GraphNodeSubPath -func (n *NodeDestroyableOutput) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// This will keep the destroy node in the graph if its corresponding output -// node is also in the destroy graph. -func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { - return true -} - -// GraphNodeReferencer -func (n *NodeDestroyableOutput) References() []*addrs.Reference { - return referencesForOutput(n.Config) -} - -// GraphNodeEvalable -func (n *NodeDestroyableOutput) EvalTree() EvalNode { - return &EvalDeleteOutput{ - Addr: n.Addr.OutputValue, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go deleted file mode 100644 index a76d1742..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// NodeOutputOrphan represents an output that is an orphan. -type NodeOutputOrphan struct { - Addr addrs.AbsOutputValue -} - -var ( - _ GraphNodeSubPath = (*NodeOutputOrphan)(nil) - _ GraphNodeReferenceable = (*NodeOutputOrphan)(nil) - _ GraphNodeReferenceOutside = (*NodeOutputOrphan)(nil) - _ GraphNodeEvalable = (*NodeOutputOrphan)(nil) -) - -func (n *NodeOutputOrphan) Name() string { - return fmt.Sprintf("%s (orphan)", n.Addr.String()) -} - -// GraphNodeReferenceOutside implementation -func (n *NodeOutputOrphan) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - return referenceOutsideForOutput(n.Addr) -} - -// GraphNodeReferenceable -func (n *NodeOutputOrphan) ReferenceableAddrs() []addrs.Referenceable { - return referenceableAddrsForOutput(n.Addr) -} - -// GraphNodeSubPath -func (n *NodeOutputOrphan) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeEvalable -func (n *NodeOutputOrphan) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, - Node: &EvalDeleteOutput{ - Addr: n.Addr.OutputValue, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go deleted file mode 100644 index 2071ab16..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go +++ /dev/null @@ -1,11 +0,0 @@ -package terraform - -// NodeApplyableProvider represents a provider during an apply. -type NodeApplyableProvider struct { - *NodeAbstractProvider -} - -// GraphNodeEvalable -func (n *NodeApplyableProvider) EvalTree() EvalNode { - return ProviderEvalTree(n, n.ProviderConfig()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go deleted file mode 100644 index afdd4741..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go +++ /dev/null @@ -1,96 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// ConcreteProviderNodeFunc is a callback type used to convert an -// abstract provider to a concrete one of some type. -type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex - -// NodeAbstractProvider represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeAbstractProvider struct { - Addr addrs.AbsProviderConfig - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Config *configs.Provider - Schema *configschema.Block -} - -var ( - _ GraphNodeSubPath = (*NodeAbstractProvider)(nil) - _ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil) - _ GraphNodeReferencer = (*NodeAbstractProvider)(nil) - _ GraphNodeProvider = (*NodeAbstractProvider)(nil) - _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil) - _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil) -) - -func (n *NodeAbstractProvider) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeAbstractProvider) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeReferencer -func (n *NodeAbstractProvider) References() []*addrs.Reference { - if n.Config == nil || n.Schema == nil { - return nil - } - - return ReferencesFromConfig(n.Config.Config, n.Schema) -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig { - return n.Addr -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider { - if n.Config == nil { - return nil - } - - return n.Config -} - -// GraphNodeAttachProvider -func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) { - n.Config = c -} - -// GraphNodeAttachProviderConfigSchema impl. -func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) { - n.Schema = schema -} - -// GraphNodeDotter impl. -func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go deleted file mode 100644 index 51335654..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go +++ /dev/null @@ -1,27 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// NodeDisabledProvider represents a provider that is disabled. A disabled -// provider does nothing. It exists to properly set inheritance information -// for child providers. -type NodeDisabledProvider struct { - *NodeAbstractProvider -} - -var ( - _ GraphNodeSubPath = (*NodeDisabledProvider)(nil) - _ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil) - _ GraphNodeReferencer = (*NodeDisabledProvider)(nil) - _ GraphNodeProvider = (*NodeDisabledProvider)(nil) - _ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil) - _ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil) -) - -func (n *NodeDisabledProvider) Name() string { - return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go deleted file mode 100644 index 580e60cb..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// NodeEvalableProvider represents a provider during an "eval" walk. -// This special provider node type just initializes a provider and -// fetches its schema, without configuring it or otherwise interacting -// with it. -type NodeEvalableProvider struct { - *NodeAbstractProvider -} - -// GraphNodeEvalable -func (n *NodeEvalableProvider) EvalTree() EvalNode { - addr := n.Addr - relAddr := addr.ProviderConfig - - return &EvalInitProvider{ - TypeName: relAddr.Type, - Addr: addr.ProviderConfig, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go deleted file mode 100644 index 573f030d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// NodeProvisioner represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeProvisioner struct { - NameValue string - PathValue addrs.ModuleInstance -} - -var ( - _ GraphNodeSubPath = (*NodeProvisioner)(nil) - _ GraphNodeProvisioner = (*NodeProvisioner)(nil) - _ GraphNodeEvalable = (*NodeProvisioner)(nil) -) - -func (n *NodeProvisioner) Name() string { - result := fmt.Sprintf("provisioner.%s", n.NameValue) - if len(n.PathValue) > 0 { - result = fmt.Sprintf("%s.%s", n.PathValue.String(), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeProvisioner) Path() addrs.ModuleInstance { - return n.PathValue -} - -// GraphNodeProvisioner -func (n *NodeProvisioner) ProvisionerName() string { - return n.NameValue -} - -// GraphNodeEvalable impl. -func (n *NodeProvisioner) EvalTree() EvalNode { - return &EvalInitProvisioner{Name: n.NameValue} -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go deleted file mode 100644 index c7b0e3c8..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go +++ /dev/null @@ -1,446 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ConcreteResourceNodeFunc is a callback type used to convert an -// abstract resource to a concrete one of some type. -type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex - -// GraphNodeResource is implemented by any nodes that represent a resource. -// The type of operation cannot be assumed, only that this node represents -// the given resource. -type GraphNodeResource interface { - ResourceAddr() addrs.AbsResource -} - -// ConcreteResourceInstanceNodeFunc is a callback type used to convert an -// abstract resource instance to a concrete one of some type. -type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex - -// GraphNodeResourceInstance is implemented by any nodes that represent -// a resource instance. A single resource may have multiple instances if, -// for example, the "count" or "for_each" argument is used for it in -// configuration. -type GraphNodeResourceInstance interface { - ResourceInstanceAddr() addrs.AbsResourceInstance -} - -// NodeAbstractResource represents a resource that has no associated -// operations. It registers all the interfaces for a resource that common -// across multiple operation types. -type NodeAbstractResource struct { - Addr addrs.AbsResource // Addr is the address for this resource - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Schema *configschema.Block // Schema for processing the configuration body - SchemaVersion uint64 // Schema version of "Schema", as decided by the provider - Config *configs.Resource // Config is the resource in the config - - ProvisionerSchemas map[string]*configschema.Block - - Targets []addrs.Targetable // Set from GraphNodeTargetable - - // The address of the provider this resource will use - ResolvedProvider addrs.AbsProviderConfig -} - -var ( - _ GraphNodeSubPath = (*NodeAbstractResource)(nil) - _ GraphNodeReferenceable = (*NodeAbstractResource)(nil) - _ GraphNodeReferencer = (*NodeAbstractResource)(nil) - _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil) - _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil) - _ GraphNodeResource = (*NodeAbstractResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil) - _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil) - _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil) - _ GraphNodeTargetable = (*NodeAbstractResource)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil) -) - -// NewNodeAbstractResource creates an abstract resource graph node for -// the given absolute resource address. -func NewNodeAbstractResource(addr addrs.AbsResource) *NodeAbstractResource { - return &NodeAbstractResource{ - Addr: addr, - } -} - -// NodeAbstractResourceInstance represents a resource instance with no -// associated operations. It embeds NodeAbstractResource but additionally -// contains an instance key, used to identify one of potentially many -// instances that were created from a resource in configuration, e.g. using -// the "count" or "for_each" arguments. -type NodeAbstractResourceInstance struct { - NodeAbstractResource - InstanceKey addrs.InstanceKey - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - ResourceState *states.Resource -} - -var ( - _ GraphNodeSubPath = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeResource = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil) -) - -// NewNodeAbstractResourceInstance creates an abstract resource instance graph -// node for the given absolute resource instance address. -func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance { - // Due to the fact that we embed NodeAbstractResource, the given address - // actually ends up split between the resource address in the embedded - // object and the InstanceKey field in our own struct. The - // ResourceInstanceAddr method will stick these back together again on - // request. - return &NodeAbstractResourceInstance{ - NodeAbstractResource: NodeAbstractResource{ - Addr: addr.ContainingResource(), - }, - InstanceKey: addr.Resource.Key, - } -} - -func (n *NodeAbstractResource) Name() string { - return n.ResourceAddr().String() -} - -func (n *NodeAbstractResourceInstance) Name() string { - return n.ResourceInstanceAddr().String() -} - -// GraphNodeSubPath -func (n *NodeAbstractResource) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeReferenceable -func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.Resource} -} - -// GraphNodeReferenceable -func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - addr := n.ResourceInstanceAddr() - return []addrs.Referenceable{ - addr.Resource, - - // A resource instance can also be referenced by the address of its - // containing resource, so that e.g. a reference to aws_instance.foo - // would match both aws_instance.foo[0] and aws_instance.foo[1]. - addr.ContainingResource().Resource, - } -} - -// GraphNodeReferencer -func (n *NodeAbstractResource) References() []*addrs.Reference { - // If we have a config then we prefer to use that. - if c := n.Config; c != nil { - var result []*addrs.Reference - - for _, traversal := range c.DependsOn { - ref, err := addrs.ParseRef(traversal) - if err != nil { - // We ignore this here, because this isn't a suitable place to return - // errors. This situation should be caught and rejected during - // validation. - log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, err) - continue - } - - result = append(result, ref) - } - - if n.Schema == nil { - // Should never happens, but we'll log if it does so that we can - // see this easily when debugging. - log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) - } - - refs, _ := lang.ReferencesInExpr(c.Count) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(c.ForEach) - result = append(result, refs...) - refs, _ = lang.ReferencesInBlock(c.Config, n.Schema) - result = append(result, refs...) - if c.Managed != nil { - for _, p := range c.Managed.Provisioners { - if p.When != configs.ProvisionerWhenCreate { - continue - } - if p.Connection != nil { - refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema) - result = append(result, refs...) - } - - schema := n.ProvisionerSchemas[p.Type] - if schema == nil { - log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name()) - } - refs, _ = lang.ReferencesInBlock(p.Config, schema) - result = append(result, refs...) - } - } - return result - } - - // Otherwise, we have no references. - return nil -} - -// GraphNodeReferencer -func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { - // If we have a configuration attached then we'll delegate to our - // embedded abstract resource, which knows how to extract dependencies - // from configuration. - if n.Config != nil { - if n.Schema == nil { - // We'll produce a log message about this out here so that - // we can include the full instance address, since the equivalent - // message in NodeAbstractResource.References cannot see it. - log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) - return nil - } - return n.NodeAbstractResource.References() - } - - // Otherwise, if we have state then we'll use the values stored in state - // as a fallback. - if rs := n.ResourceState; rs != nil { - if s := rs.Instance(n.InstanceKey); s != nil { - // State is still storing dependencies as old-style strings, so we'll - // need to do a little work here to massage this to the form we now - // want. - var result []*addrs.Reference - - // It is (apparently) possible for s.Current to be nil. This proved - // difficult to reproduce, so we will fix the symptom here and hope - // to find the root cause another time. - // - // https://github.com/hashicorp/terraform-plugin-sdk/issues/21407 - if s.Current == nil { - log.Printf("[WARN] no current state found for %s", n.Name()) - } else { - for _, addr := range s.Current.Dependencies { - if addr == nil { - // Should never happen; indicates a bug in the state loader - panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr())) - } - - // This is a little weird: we need to manufacture an addrs.Reference - // with a fake range here because the state isn't something we can - // make source references into. - result = append(result, &addrs.Reference{ - Subject: addr, - SourceRange: tfdiags.SourceRange{ - Filename: "(state file)", - }, - }) - } - } - return result - } - } - - // If we have neither config nor state then we have no references. - return nil -} - -// StateReferences returns the dependencies to put into the state for -// this resource. -func (n *NodeAbstractResourceInstance) StateReferences() []addrs.Referenceable { - selfAddrs := n.ReferenceableAddrs() - - // Since we don't include the source location references in our - // results from this method, we'll also filter out duplicates: - // there's no point in listing the same object twice without - // that additional context. - seen := map[string]struct{}{} - - // Pretend that we've already "seen" all of our own addresses so that we - // won't record self-references in the state. This can arise if, for - // example, a provisioner for a resource refers to the resource itself, - // which is valid (since provisioners always run after apply) but should - // not create an explicit dependency edge. - for _, selfAddr := range selfAddrs { - seen[selfAddr.String()] = struct{}{} - if riAddr, ok := selfAddr.(addrs.ResourceInstance); ok { - seen[riAddr.ContainingResource().String()] = struct{}{} - } - } - - depsRaw := n.References() - deps := make([]addrs.Referenceable, 0, len(depsRaw)) - for _, d := range depsRaw { - subj := d.Subject - if mco, isOutput := subj.(addrs.ModuleCallOutput); isOutput { - // For state dependencies, we simplify outputs to just refer - // to the module as a whole. It's not really clear why we do this, - // but this logic is preserved from before the 0.12 rewrite of - // this function. - subj = mco.Call - } - - k := subj.String() - if _, exists := seen[k]; exists { - continue - } - seen[k] = struct{}{} - switch tr := subj.(type) { - case addrs.ResourceInstance: - deps = append(deps, tr) - case addrs.Resource: - deps = append(deps, tr) - case addrs.ModuleCallInstance: - deps = append(deps, tr) - default: - // No other reference types are recorded in the state. - } - } - - // We'll also sort them, since that'll avoid creating changes in the - // serialized state that make no semantic difference. - sort.Slice(deps, func(i, j int) bool { - // Simple string-based sort because we just care about consistency, - // not user-friendliness. - return deps[i].String() < deps[j].String() - }) - - return deps -} - -func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) { - n.ResolvedProvider = p -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResource) ProvidedBy() (addrs.AbsProviderConfig, bool) { - // If we have a config we prefer that above all else - if n.Config != nil { - relAddr := n.Config.ProviderConfigAddr() - return relAddr.Absolute(n.Path()), false - } - - // Use our type and containing module path to guess a provider configuration address - return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Addr.Module), false -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.AbsProviderConfig, bool) { - // If we have a config we prefer that above all else - if n.Config != nil { - relAddr := n.Config.ProviderConfigAddr() - return relAddr.Absolute(n.Path()), false - } - - // If we have state, then we will use the provider from there - if n.ResourceState != nil { - // An address from the state must match exactly, since we must ensure - // we refresh/destroy a resource with the same provider configuration - // that created it. - return n.ResourceState.ProviderConfig, true - } - - // Use our type and containing module path to guess a provider configuration address - return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Path()), false -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) ProvisionedBy() []string { - // If we have no configuration, then we have no provisioners - if n.Config == nil || n.Config.Managed == nil { - return nil - } - - // Build the list of provisioners we need based on the configuration. - // It is okay to have duplicates here. - result := make([]string, len(n.Config.Managed.Provisioners)) - for i, p := range n.Config.Managed.Provisioners { - result[i] = p.Type - } - - return result -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) { - if n.ProvisionerSchemas == nil { - n.ProvisionerSchemas = make(map[string]*configschema.Block) - } - n.ProvisionerSchemas[name] = schema -} - -// GraphNodeResource -func (n *NodeAbstractResource) ResourceAddr() addrs.AbsResource { - return n.Addr -} - -// GraphNodeResourceInstance -func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { - return n.NodeAbstractResource.Addr.Instance(n.InstanceKey) -} - -// GraphNodeAddressable, TODO: remove, used by target, should unify -func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { - return NewLegacyResourceAddress(n.Addr) -} - -// GraphNodeTargetable -func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) { - n.Targets = targets -} - -// GraphNodeAttachResourceState -func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) { - n.ResourceState = s -} - -// GraphNodeAttachResourceConfig -func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) { - n.Config = c -} - -// GraphNodeAttachResourceSchema impl -func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) { - n.Schema = schema - n.SchemaVersion = version -} - -// GraphNodeDotter impl. -func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "box", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go deleted file mode 100644 index 68d438d7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go +++ /dev/null @@ -1,71 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" -) - -// NodeApplyableResource represents a resource that is "applyable": -// it may need to have its record in the state adjusted to match configuration. -// -// Unlike in the plan walk, this resource node does not DynamicExpand. Instead, -// it should be inserted into the same graph as any instances of the nodes -// with dependency edges ensuring that the resource is evaluated before any -// of its instances, which will turn ensure that the whole-resource record -// in the state is suitably prepared to receive any updates to instances. -type NodeApplyableResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeResource = (*NodeApplyableResource)(nil) - _ GraphNodeEvalable = (*NodeApplyableResource)(nil) - _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil) - _ GraphNodeReferencer = (*NodeApplyableResource)(nil) -) - -func (n *NodeApplyableResource) Name() string { - return n.NodeAbstractResource.Name() + " (prepare state)" -} - -func (n *NodeApplyableResource) References() []*addrs.Reference { - if n.Config == nil { - log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n)) - return nil - } - - var result []*addrs.Reference - - // Since this node type only updates resource-level metadata, we only - // need to worry about the parts of the configuration that affect - // our "each mode": the count and for_each meta-arguments. - refs, _ := lang.ReferencesInExpr(n.Config.Count) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(n.Config.ForEach) - result = append(result, refs...) - - return result -} - -// GraphNodeEvalable -func (n *NodeApplyableResource) EvalTree() EvalNode { - addr := n.ResourceAddr() - config := n.Config - providerAddr := n.ResolvedProvider - - if config == nil { - // Nothing to do, then. - log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) - return &EvalNoop{} - } - - return &EvalWriteResourceState{ - Addr: addr.Resource, - Config: config, - ProviderAddr: providerAddr, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go deleted file mode 100644 index acdda45e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go +++ /dev/null @@ -1,426 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// NodeApplyableResourceInstance represents a resource instance that is -// "applyable": it is ready to be applied and is represented by a diff. -// -// This node is for a specific instance of a resource. It will usually be -// accompanied in the graph by a NodeApplyableResource representing its -// containing resource, and should depend on that node to ensure that the -// state is properly prepared to receive changes to instances. -type NodeApplyableResourceInstance struct { - *NodeAbstractResourceInstance - - destroyNode GraphNodeDestroyerCBD - graphNodeDeposer // implementation of GraphNodeDeposer -} - -var ( - _ GraphNodeResource = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil) -) - -// GraphNodeAttachDestroyer -func (n *NodeApplyableResourceInstance) AttachDestroyNode(d GraphNodeDestroyerCBD) { - n.destroyNode = d -} - -// createBeforeDestroy checks this nodes config status and the status af any -// companion destroy node for CreateBeforeDestroy. -func (n *NodeApplyableResourceInstance) createBeforeDestroy() bool { - cbd := false - - if n.Config != nil && n.Config.Managed != nil { - cbd = n.Config.Managed.CreateBeforeDestroy - } - - if n.destroyNode != nil { - cbd = cbd || n.destroyNode.CreateBeforeDestroy() - } - - return cbd -} - -// GraphNodeCreator -func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeReferencer, overriding NodeAbstractResourceInstance -func (n *NodeApplyableResourceInstance) References() []*addrs.Reference { - // Start with the usual resource instance implementation - ret := n.NodeAbstractResourceInstance.References() - - // Applying a resource must also depend on the destruction of any of its - // dependencies, since this may for example affect the outcome of - // evaluating an entire list of resources with "count" set (by reducing - // the count). - // - // However, we can't do this in create_before_destroy mode because that - // would create a dependency cycle. We make a compromise here of requiring - // changes to be updated across two applies in this case, since the first - // plan will use the old values. - if !n.createBeforeDestroy() { - for _, ref := range ret { - switch tr := ref.Subject.(type) { - case addrs.ResourceInstance: - newRef := *ref // shallow copy so we can mutate - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - newRef.Remaining = nil // can't access attributes of something being destroyed - ret = append(ret, &newRef) - case addrs.Resource: - newRef := *ref // shallow copy so we can mutate - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - newRef.Remaining = nil // can't access attributes of something being destroyed - ret = append(ret, &newRef) - } - } - } - - return ret -} - -// GraphNodeEvalable -func (n *NodeApplyableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - if n.Config == nil { - // This should not be possible, but we've got here in at least one - // case as discussed in the following issue: - // https://github.com/hashicorp/terraform-plugin-sdk/issues/21258 - // To avoid an outright crash here, we'll instead return an explicit - // error. - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource node has no configuration attached", - fmt.Sprintf( - "The graph node for %s has no configuration attached to it. This suggests a bug in Terraform's apply graph builder; please report it!", - addr, - ), - )) - err := diags.Err() - return &EvalReturnError{ - Error: &err, - } - } - - // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case addrs.ManagedResourceMode: - return n.evalTreeManagedResource(addr) - case addrs.DataResourceMode: - return n.evalTreeDataResource(addr) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - - // Stop early if we don't actually have a diff - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if change == nil { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - // In this particular call to EvalReadData we include our planned - // change, which signals that we expect this read to complete fully - // with no unknown values; it'll produce an error if not. - &EvalReadData{ - Addr: addr.Resource, - Config: n.Config, - Dependencies: n.StateReferences(), - Planned: &change, // setting this indicates that the result must be complete - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - OutputState: &state, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - - // Clear the diff now that we've applied it, so - // later nodes won't see a diff that's now a no-op. - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: nil, - }, - - &EvalUpdateStateHook{}, - }, - } -} - -func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var diff, diffApply *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var err error - var createNew bool - var createBeforeDestroyEnabled bool - var configVal cty.Value - var deposedKey states.DeposedKey - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &diffApply, - }, - - // We don't want to do any destroys - // (these are handled by NodeDestroyResourceInstance instead) - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil { - return true, EvalEarlyExitError{} - } - if diffApply.Action == plans.Delete { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - destroy := false - if diffApply != nil { - destroy = (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) - } - if destroy && n.createBeforeDestroy() { - createBeforeDestroyEnabled = true - } - return createBeforeDestroyEnabled, nil - }, - Then: &EvalDeposeState{ - Addr: addr.Resource, - ForceKey: n.PreallocatedDeposedKey, - OutputKey: &deposedKey, - }, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - // Get the saved diff - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &diff, - }, - - // Make a new diff, in case we've learned new values in the state - // during apply which we can now incorporate. - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - PreviousDiff: &diff, - OutputChange: &diffApply, - OutputValue: &configVal, - OutputState: &state, - }, - - // Compare the diffs - &EvalCheckPlannedChange{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Planned: &diff, - Actual: &diffApply, - }, - - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalReduceDiff{ - Addr: addr.Resource, - InChange: &diffApply, - Destroy: false, - OutChange: &diffApply, - }, - - // EvalReduceDiff may have simplified our planned change - // into a NoOp if it only requires destroying, since destroying - // is handled by NodeDestroyResourceInstance. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil || diffApply.Action == plans.NoOp { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - }, - &EvalApply{ - Addr: addr.Resource, - Config: n.Config, - Dependencies: n.StateReferences(), - State: &state, - Change: &diffApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - CreateNew: &createNew, - }, - &EvalMaybeTainted{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - Error: &err, - StateOutput: &state, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalApplyProvisioners{ - Addr: addr.Resource, - State: &state, // EvalApplyProvisioners will skip if already tainted - ResourceConfig: n.Config, - CreateNew: &createNew, - Error: &err, - When: configs.ProvisionerWhenCreate, - }, - &EvalMaybeTainted{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - Error: &err, - StateOutput: &state, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return createBeforeDestroyEnabled && err != nil, nil - }, - Then: &EvalMaybeRestoreDeposedObject{ - Addr: addr.Resource, - Key: &deposedKey, - }, - }, - - // We clear the diff out here so that future nodes - // don't see a diff that is already complete. There - // is no longer a diff! - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if !diff.Action.IsReplace() { - return true, nil - } - if !n.createBeforeDestroy() { - return true, nil - } - return false, nil - }, - Then: &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: nil, - }, - }, - - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go deleted file mode 100644 index 049e5e99..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go +++ /dev/null @@ -1,321 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// NodeDestroyResourceInstance represents a resource instance that is to be -// destroyed. -type NodeDestroyResourceInstance struct { - *NodeAbstractResourceInstance - - // If DeposedKey is set to anything other than states.NotDeposed then - // this node destroys a deposed object of the associated instance - // rather than its current object. - DeposedKey states.DeposedKey - - CreateBeforeDestroyOverride *bool -} - -var ( - _ GraphNodeResource = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil) -) - -func (n *NodeDestroyResourceInstance) Name() string { - if n.DeposedKey != states.NotDeposed { - return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) - } - return n.ResourceInstanceAddr().String() + " (destroy)" -} - -// GraphNodeDestroyer -func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool { - if n.CreateBeforeDestroyOverride != nil { - return *n.CreateBeforeDestroyOverride - } - - // If we have no config, we just assume no - if n.Config == nil || n.Config.Managed == nil { - return false - } - - return n.Config.Managed.CreateBeforeDestroy -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error { - n.CreateBeforeDestroyOverride = &v - return nil -} - -// GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs() - destroyAddrs := make([]addrs.Referenceable, len(normalAddrs)) - - phaseType := addrs.ResourceInstancePhaseDestroy - if n.CreateBeforeDestroy() { - phaseType = addrs.ResourceInstancePhaseDestroyCBD - } - - for i, normalAddr := range normalAddrs { - switch ta := normalAddr.(type) { - case addrs.Resource: - destroyAddrs[i] = ta.Phase(phaseType) - case addrs.ResourceInstance: - destroyAddrs[i] = ta.Phase(phaseType) - default: - destroyAddrs[i] = normalAddr - } - } - - return destroyAddrs -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { - // If we have a config, then we need to include destroy-time dependencies - if c := n.Config; c != nil && c.Managed != nil { - var result []*addrs.Reference - - // We include conn info and config for destroy time provisioners - // as dependencies that we have. - for _, p := range c.Managed.Provisioners { - schema := n.ProvisionerSchemas[p.Type] - - if p.When == configs.ProvisionerWhenDestroy { - if p.Connection != nil { - result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...) - } - result = append(result, ReferencesFromConfig(p.Config, schema)...) - } - } - - return result - } - - return nil -} - -// GraphNodeEvalable -func (n *NodeDestroyResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Get our state - rs := n.ResourceState - var is *states.ResourceInstance - if rs != nil { - is = rs.Instance(n.InstanceKey) - } - if is == nil { - log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr) - } - - var changeApply *plans.ResourceInstanceChange - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - var err error - return &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &changeApply, - }, - - &EvalReduceDiff{ - Addr: addr.Resource, - InChange: &changeApply, - Destroy: true, - OutChange: &changeApply, - }, - - // EvalReduceDiff may have simplified our planned change - // into a NoOp if it does not require destroying. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if changeApply == nil || changeApply.Action == plans.NoOp { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalReadState{ - Addr: addr.Resource, - Output: &state, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalRequireState{ - State: &state, - }, - - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &changeApply, - }, - - // Run destroy provisioners if not tainted - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if state != nil && state.Status == states.ObjectTainted { - return false, nil - } - - return true, nil - }, - - Then: &EvalApplyProvisioners{ - Addr: addr.Resource, - State: &state, - ResourceConfig: n.Config, - Error: &err, - When: configs.ProvisionerWhenDestroy, - }, - }, - - // If we have a provisioning error, then we just call - // the post-apply hook now. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return err != nil, nil - }, - - Then: &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - }, - - // Make sure we handle data sources properly. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return addr.Resource.Resource.Mode == addrs.DataResourceMode, nil - }, - - Then: &EvalReadDataApply{ - Addr: addr.Resource, - Config: n.Config, - Change: &changeApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - }, - Else: &EvalApply{ - Addr: addr.Resource, - Config: nil, // No configuration because we are destroying - State: &state, - Change: &changeApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - }, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - }, - } -} - -// NodeDestroyResourceInstance represents a resource that is to be destroyed. -// -// Destroying a resource is a state-only operation: it is the individual -// instances being destroyed that affects remote objects. During graph -// construction, NodeDestroyResource should always depend on any other node -// related to the given resource, since it's just a final cleanup to avoid -// leaving skeleton resource objects in state after their instances have -// all been destroyed. -type NodeDestroyResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeResource = (*NodeDestroyResource)(nil) - _ GraphNodeReferenceable = (*NodeDestroyResource)(nil) - _ GraphNodeReferencer = (*NodeDestroyResource)(nil) - _ GraphNodeEvalable = (*NodeDestroyResource)(nil) -) - -func (n *NodeDestroyResource) Name() string { - return n.ResourceAddr().String() + " (clean up state)" -} - -// GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResource) ReferenceableAddrs() []addrs.Referenceable { - // NodeDestroyResource doesn't participate in references: the graph - // builder that created it should ensure directly that it already depends - // on every other node related to its resource, without relying on - // references. - return nil -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResource) References() []*addrs.Reference { - // NodeDestroyResource doesn't participate in references: the graph - // builder that created it should ensure directly that it already depends - // on every other node related to its resource, without relying on - // references. - return nil -} - -// GraphNodeEvalable -func (n *NodeDestroyResource) EvalTree() EvalNode { - // This EvalNode will produce an error if the resource isn't already - // empty by the time it is called, since it should just be pruning the - // leftover husk of a resource in state after all of the child instances - // and their objects were destroyed. - return &EvalForgetResourceState{ - Addr: n.ResourceAddr().Resource, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go deleted file mode 100644 index 269c7980..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go +++ /dev/null @@ -1,313 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert -// an abstract resource instance to a concrete one of some type that has -// an associated deposed object key. -type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex - -type GraphNodeDeposedResourceInstanceObject interface { - DeposedInstanceObjectKey() states.DeposedKey -} - -// NodePlanDeposedResourceInstanceObject represents deposed resource -// instance objects during plan. These are distinct from the primary object -// for each resource instance since the only valid operation to do with them -// is to destroy them. -// -// This node type is also used during the refresh walk to ensure that the -// record of a deposed object is up-to-date before we plan to destroy it. -type NodePlanDeposedResourceInstanceObject struct { - *NodeAbstractResourceInstance - DeposedKey states.DeposedKey -} - -var ( - _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeResource = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeEvalable = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) -) - -func (n *NodePlanDeposedResourceInstanceObject) Name() string { - return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey) -} - -func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { - return n.DeposedKey -} - -// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { - // Deposed objects don't participate in references. - return nil -} - -// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference { - // We don't evaluate configuration for deposed objects, so they effectively - // make no references. - return nil -} - -// GraphNodeEvalable impl. -func (n *NodePlanDeposedResourceInstanceObject) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - - seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} - - // During the refresh walk we will ensure that our record of the deposed - // object is up-to-date. If it was already deleted outside of Terraform - // then this will remove it from state and thus avoid us planning a - // destroy for it during the subsequent plan walk. - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - Key: n.DeposedKey, - Output: &state, - }, - &EvalRefresh{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - &EvalWriteStateDeposed{ - Addr: addr.Resource, - Key: n.DeposedKey, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - }, - }) - - // During the plan walk we always produce a planned destroy change, because - // destroying is the only supported action for deposed objects. - var change *plans.ResourceInstanceChange - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkPlan, walkPlanDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Output: &state, - Key: n.DeposedKey, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - DeposedKey: n.DeposedKey, - State: &state, - Output: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - DeposedKey: n.DeposedKey, - ProviderSchema: &providerSchema, - Change: &change, - }, - // Since deposed objects cannot be referenced by expressions - // elsewhere, we don't need to also record the planned new - // state in this case. - }, - }, - }) - - return seq -} - -// NodeDestroyDeposedResourceInstanceObject represents deposed resource -// instance objects during apply. Nodes of this type are inserted by -// DiffTransformer when the planned changeset contains "delete" changes for -// deposed instance objects, and its only supported operation is to destroy -// and then forget the associated object. -type NodeDestroyDeposedResourceInstanceObject struct { - *NodeAbstractResourceInstance - DeposedKey states.DeposedKey -} - -var ( - _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeResource = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeEvalable = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) -) - -func (n *NodeDestroyDeposedResourceInstanceObject) Name() string { - return fmt.Sprintf("%s (destroy deposed %s)", n.Addr.String(), n.DeposedKey) -} - -func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { - return n.DeposedKey -} - -// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { - // Deposed objects don't participate in references. - return nil -} - -// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference { - // We don't evaluate configuration for deposed objects, so they effectively - // make no references. - return nil -} - -// GraphNodeDestroyer -func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool { - // A deposed instance is always CreateBeforeDestroy by definition, since - // we use deposed only to handle create-before-destroy. - return true -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error { - if !v { - // Should never happen: deposed instances are _always_ create_before_destroy. - return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance") - } - return nil -} - -// GraphNodeEvalable impl. -func (n *NodeDestroyDeposedResourceInstanceObject) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - var change *plans.ResourceInstanceChange - var err error - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Output: &state, - Key: n.DeposedKey, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - Output: &change, - }, - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &change, - }, - &EvalApply{ - Addr: addr.Resource, - Config: nil, // No configuration because we are destroying - State: &state, - Change: &change, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - }, - // Always write the resource back to the state deposed... if it - // was successfully destroyed it will be pruned. If it was not, it will - // be caught on the next run. - &EvalWriteStateDeposed{ - Addr: addr.Resource, - Key: n.DeposedKey, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalReturnError{ - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - } -} - -// GraphNodeDeposer is an optional interface implemented by graph nodes that -// might create a single new deposed object for a specific associated resource -// instance, allowing a caller to optionally pre-allocate a DeposedKey for -// it. -type GraphNodeDeposer interface { - // SetPreallocatedDeposedKey will be called during graph construction - // if a particular node must use a pre-allocated deposed key if/when it - // "deposes" the current object of its associated resource instance. - SetPreallocatedDeposedKey(key states.DeposedKey) -} - -// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer. -// Embed it in a node type to get automatic support for it, and then access -// the field PreallocatedDeposedKey to access any pre-allocated key. -type graphNodeDeposer struct { - PreallocatedDeposedKey states.DeposedKey -} - -func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { - n.PreallocatedDeposedKey = key -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go deleted file mode 100644 index 2dc0df90..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go +++ /dev/null @@ -1,166 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// NodePlannableResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. -type NodePlannableResource struct { - *NodeAbstractResource - - // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD - // during graph construction, if dependencies require us to force this - // on regardless of what the configuration says. - ForceCreateBeforeDestroy *bool -} - -var ( - _ GraphNodeSubPath = (*NodePlannableResource)(nil) - _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil) - _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil) - _ GraphNodeReferenceable = (*NodePlannableResource)(nil) - _ GraphNodeReferencer = (*NodePlannableResource)(nil) - _ GraphNodeResource = (*NodePlannableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil) -) - -// GraphNodeEvalable -func (n *NodePlannableResource) EvalTree() EvalNode { - addr := n.ResourceAddr() - config := n.Config - - if config == nil { - // Nothing to do, then. - log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) - return &EvalNoop{} - } - - // this ensures we can reference the resource even if the count is 0 - return &EvalWriteResourceState{ - Addr: addr.Resource, - Config: config, - ProviderAddr: n.ResolvedProvider, - } -} - -// GraphNodeDestroyerCBD -func (n *NodePlannableResource) CreateBeforeDestroy() bool { - if n.ForceCreateBeforeDestroy != nil { - return *n.ForceCreateBeforeDestroy - } - - // If we have no config, we just assume no - if n.Config == nil || n.Config.Managed == nil { - return false - } - - return n.Config.Managed.CreateBeforeDestroy -} - -// GraphNodeDestroyerCBD -func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error { - n.ForceCreateBeforeDestroy = &v - return nil -} - -// GraphNodeDynamicExpandable -func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - - return &NodePlannableResourceInstance{ - NodeAbstractResourceInstance: a, - - // By the time we're walking, we've figured out whether we need - // to force on CreateBeforeDestroy due to dependencies on other - // nodes that have it. - ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), - } - } - - // The concrete resource factory we'll use for orphans - concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count or for_each (if present) - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - }, - - // Add the count/for_each orphans - &OrphanResourceCountTransformer{ - Concrete: concreteResourceOrphan, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodePlannableResource", - } - graph, diags := b.Build(ctx.Path()) - return graph, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go deleted file mode 100644 index 2c3a7012..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go +++ /dev/null @@ -1,88 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// NodePlanDestroyableResourceInstance represents a resource that is ready -// to be planned for destruction. -type NodePlanDestroyableResourceInstance struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeSubPath = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeResource = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil) -) - -// GraphNodeDestroyer -func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeEvalable -func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. These are written to by address in the EvalNodes we - // declare below. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - if n.ResolvedProvider.ProviderConfig.Type == "" { - // Should never happen; indicates that the graph was not constructed - // correctly since we didn't get our provider attached. - panic(fmt.Sprintf("%T %q was not assigned a resolved provider", n, dag.VertexName(n))) - } - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - Output: &change, - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go deleted file mode 100644 index ac4b24cf..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go +++ /dev/null @@ -1,201 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// NodePlannableResourceInstance represents a _single_ resource -// instance that is plannable. This means this represents a single -// count index, for example. -type NodePlannableResourceInstance struct { - *NodeAbstractResourceInstance - ForceCreateBeforeDestroy bool -} - -var ( - _ GraphNodeSubPath = (*NodePlannableResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil) - _ GraphNodeResource = (*NodePlannableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil) -) - -// GraphNodeEvalable -func (n *NodePlannableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - return n.evalTreeManagedResource(addr) - case addrs.DataResourceMode: - return n.evalTreeDataResource(addr) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { - config := n.Config - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var configVal cty.Value - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - // If we already have a non-planned state then we already dealt - // with this during the refresh walk and so we have nothing to do - // here. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - depChanges := false - - // Check and see if any of our dependencies have changes. - changes := ctx.Changes() - for _, d := range n.StateReferences() { - ri, ok := d.(addrs.ResourceInstance) - if !ok { - continue - } - change := changes.GetResourceInstanceChange(ri.Absolute(ctx.Path()), states.CurrentGen) - if change != nil && change.Action != plans.NoOp { - depChanges = true - break - } - } - - refreshed := state != nil && state.Status != states.ObjectPlanned - - // If there are no dependency changes, and it's not a forced - // read because we there was no Refresh, then we don't need - // to re-read. If any dependencies have changes, it means - // our config may also have changes and we need to Read the - // data source again. - if !depChanges && refreshed { - return false, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalValidateSelfRef{ - Addr: addr.Resource, - Config: config.Config, - ProviderSchema: &providerSchema, - }, - - &EvalReadData{ - Addr: addr.Resource, - Config: n.Config, - Dependencies: n.StateReferences(), - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - ForcePlanRead: true, // _always_ produce a Read change, even if the config seems ready - OutputChange: &change, - OutputValue: &configVal, - OutputState: &state, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} - -func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { - config := n.Config - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalValidateSelfRef{ - Addr: addr.Resource, - Config: config.Config, - ProviderSchema: &providerSchema, - }, - - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - CreateBeforeDestroy: n.ForceCreateBeforeDestroy, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - OutputChange: &change, - OutputState: &state, - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go deleted file mode 100644 index 8e4f7148..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go +++ /dev/null @@ -1,84 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// NodePlannableResourceInstanceOrphan represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodePlannableResourceInstanceOrphan struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeSubPath = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeResource = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) -) - -var ( - _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) -) - -func (n *NodePlannableResourceInstanceOrphan) Name() string { - return n.ResourceInstanceAddr().String() + " (orphan)" -} - -// GraphNodeEvalable -func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var provider providers.Interface - var providerSchema *ProviderSchema - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - State: &state, - ProviderAddr: n.ResolvedProvider, - Output: &change, - OutputState: &state, // Will point to a nil state after this complete, signalling destroyed - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go deleted file mode 100644 index dcab3727..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go +++ /dev/null @@ -1,296 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// NodeRefreshableManagedResource represents a resource that is expanabled into -// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. -type NodeRefreshableManagedResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeSubPath = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeResource = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil) -) - -// GraphNodeDynamicExpandable -func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - - return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - }, - - // Add the count orphans to make sure these resources are accounted for - // during a scale in. - &OrphanResourceCountTransformer{ - Concrete: concreteResource, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeRefreshableManagedResource", - } - - graph, diags := b.Build(ctx.Path()) - return graph, diags.ErrWithWarnings() -} - -// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodeRefreshableManagedResourceInstance struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeSubPath = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeResource = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil) -) - -// GraphNodeDestroyer -func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeEvalable -func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - if n.ResourceState == nil { - log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s has no existing state to refresh", addr) - return n.evalTreeManagedResourceNoState() - } - log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s will be refreshed", addr) - return n.evalTreeManagedResource() - - case addrs.DataResourceMode: - // Get the data source node. If we don't have a configuration - // then it is an orphan so we destroy it (remove it from the state). - var dn GraphNodeEvalable - if n.Config != nil { - dn = &NodeRefreshableDataResourceInstance{ - NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, - } - } else { - dn = &NodeDestroyableDataResourceInstance{ - NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, - } - } - - return dn.EvalTree() - default: - panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode)) - } -} - -func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - - // This happened during initial development. All known cases were - // fixed and tested but as a sanity check let's assert here. - if n.ResourceState == nil { - err := fmt.Errorf( - "No resource state attached for addr: %s\n\n"+ - "This is a bug. Please report this to Terraform with your configuration\n"+ - "and state attached. Please be careful to scrub any sensitive information.", - addr) - return &EvalReturnError{Error: &err} - } - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalRefresh{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - } -} - -// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource -// nodes that don't have state attached. An example of where this functionality -// is useful is when a resource that already exists in state is being scaled -// out, ie: has its resource count increased. In this case, the scaled out node -// needs to be available to other nodes (namely data sources) that may depend -// on it for proper interpolation, or confusing "index out of range" errors can -// occur. -// -// The steps in this sequence are very similar to the steps carried out in -// plan, but nothing is done with the diff after it is created - it is dropped, -// and its changes are not counted in the UI. -func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - OutputChange: &change, - OutputState: &state, - Stub: true, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - - // We must also save the planned change, so that expressions in - // other nodes, such as provider configurations and data resources, - // can work with the planned new value. - // - // This depends on the fact that Context.Refresh creates a - // temporary new empty changeset for the duration of its graph - // walk, and so this recorded change will be discarded immediately - // after the refresh walk completes. - &EvalWriteDiff{ - Addr: addr.Resource, - Change: &change, - ProviderSchema: &providerSchema, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go deleted file mode 100644 index f0eb18a0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go +++ /dev/null @@ -1,90 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/zclconf/go-cty/cty" -) - -// NodeValidatableResource represents a resource that is used for validation -// only. -type NodeValidatableResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeSubPath = (*NodeValidatableResource)(nil) - _ GraphNodeEvalable = (*NodeValidatableResource)(nil) - _ GraphNodeReferenceable = (*NodeValidatableResource)(nil) - _ GraphNodeReferencer = (*NodeValidatableResource)(nil) - _ GraphNodeResource = (*NodeValidatableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil) -) - -// GraphNodeEvalable -func (n *NodeValidatableResource) EvalTree() EvalNode { - addr := n.ResourceAddr() - config := n.Config - - // Declare the variables will be used are used to pass values along - // the evaluation sequence below. These are written to via pointers - // passed to the EvalNodes. - var provider providers.Interface - var providerSchema *ProviderSchema - var configVal cty.Value - - seq := &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalValidateResource{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - Config: config, - ConfigVal: &configVal, - }, - }, - } - - if managed := n.Config.Managed; managed != nil { - hasCount := n.Config.Count != nil - hasForEach := n.Config.ForEach != nil - - // Validate all the provisioners - for _, p := range managed.Provisioners { - var provisioner provisioners.Interface - var provisionerSchema *configschema.Block - - if p.Connection == nil { - p.Connection = config.Managed.Connection - } else if config.Managed.Connection != nil { - p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config) - } - - seq.Nodes = append( - seq.Nodes, - &EvalGetProvisioner{ - Name: p.Type, - Output: &provisioner, - Schema: &provisionerSchema, - }, - &EvalValidateProvisioner{ - ResourceAddr: addr.Resource, - Provisioner: &provisioner, - Schema: &provisionerSchema, - Config: p, - ResourceHasCount: hasCount, - ResourceHasForEach: hasForEach, - }, - ) - } - } - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go deleted file mode 100644 index 844d060c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// NodeRootVariable represents a root variable input. -type NodeRootVariable struct { - Addr addrs.InputVariable - Config *configs.Variable -} - -var ( - _ GraphNodeSubPath = (*NodeRootVariable)(nil) - _ GraphNodeReferenceable = (*NodeRootVariable)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) -) - -func (n *NodeRootVariable) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeRootVariable) Path() addrs.ModuleInstance { - return addrs.RootModuleInstance -} - -// GraphNodeReferenceable -func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr} -} - -// dag.GraphNodeDotter impl. -func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go deleted file mode 100644 index 19e3469c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go +++ /dev/null @@ -1,17 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// PathObjectCacheKey is like PathCacheKey but includes an additional name -// to be included in the key, for module-namespaced objects. -// -// The result of this function is guaranteed unique for any distinct pair -// of path and name, but is not guaranteed to be in any particular format -// and in particular should never be shown to end-users. -func PathObjectCacheKey(path addrs.ModuleInstance, objectName string) string { - return fmt.Sprintf("%s|%s", path.String(), objectName) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go deleted file mode 100644 index 5c19f6e7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go +++ /dev/null @@ -1,94 +0,0 @@ -package terraform - -import ( - "bytes" - "encoding/gob" - "fmt" - "io" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/zclconf/go-cty/cty" -) - -func init() { - gob.Register(make([]interface{}, 0)) - gob.Register(make([]map[string]interface{}, 0)) - gob.Register(make(map[string]interface{})) - gob.Register(make(map[string]string)) -} - -// Plan represents a single Terraform execution plan, which contains -// all the information necessary to make an infrastructure change. -// -// A plan has to contain basically the entire state of the world -// necessary to make a change: the state, diff, config, backend config, etc. -// This is so that it can run alone without any other data. -type Plan struct { - // Diff describes the resource actions that must be taken when this - // plan is applied. - Diff *Diff - - // Config represents the entire configuration that was present when this - // plan was created. - Config *configs.Config - - // State is the Terraform state that was current when this plan was - // created. - // - // It is not allowed to apply a plan that has a stale state, since its - // diff could be outdated. - State *State - - // Vars retains the variables that were set when creating the plan, so - // that the same variables can be applied during apply. - Vars map[string]cty.Value - - // Targets, if non-empty, contains a set of resource address strings that - // identify graph nodes that were selected as targets for plan. - // - // When targets are set, any graph node that is not directly targeted or - // indirectly targeted via dependencies is excluded from the graph. - Targets []string - - // TerraformVersion is the version of Terraform that was used to create - // this plan. - // - // It is not allowed to apply a plan created with a different version of - // Terraform, since the other fields of this structure may be interpreted - // in different ways between versions. - TerraformVersion string - - // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries - // used as plugins for each provider during plan. - // - // These must match between plan and apply to ensure that the diff is - // correctly interpreted, since different provider versions may have - // different attributes or attribute value constraints. - ProviderSHA256s map[string][]byte - - // Backend is the backend that this plan should use and store data with. - Backend *BackendState - - // Destroy indicates that this plan was created for a full destroy operation - Destroy bool -} - -func (p *Plan) String() string { - buf := new(bytes.Buffer) - buf.WriteString("DIFF:\n\n") - buf.WriteString(p.Diff.String()) - buf.WriteString("\n\nSTATE:\n\n") - buf.WriteString(p.State.String()) - return buf.String() -} - -// ReadPlan reads a plan structure out of a reader in the format that -// was written by WritePlan. -func ReadPlan(src io.Reader) (*Plan, error) { - return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead") -} - -// WritePlan writes a plan somewhere in a binary format. -func WritePlan(d *Plan, dst io.Writer) error { - return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go deleted file mode 100644 index 7e401f33..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go +++ /dev/null @@ -1,521 +0,0 @@ -package terraform - -import ( - "encoding/json" - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -var _ providers.Interface = (*MockProvider)(nil) - -// MockProvider implements providers.Interface but mocks out all the -// calls for testing purposes. -type MockProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests - - PrepareProviderConfigCalled bool - PrepareProviderConfigResponse providers.PrepareProviderConfigResponse - PrepareProviderConfigRequest providers.PrepareProviderConfigRequest - PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse - - ValidateResourceTypeConfigCalled bool - ValidateResourceTypeConfigTypeName string - ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse - ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest - ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse - - ValidateDataSourceConfigCalled bool - ValidateDataSourceConfigTypeName string - ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse - ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest - ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse - - UpgradeResourceStateCalled bool - UpgradeResourceStateTypeName string - UpgradeResourceStateResponse providers.UpgradeResourceStateResponse - UpgradeResourceStateRequest providers.UpgradeResourceStateRequest - UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse - - ConfigureCalled bool - ConfigureResponse providers.ConfigureResponse - ConfigureRequest providers.ConfigureRequest - ConfigureNewFn func(providers.ConfigureRequest) providers.ConfigureResponse // Named ConfigureNewFn so we can still have the legacy ConfigureFn declared below - - StopCalled bool - StopFn func() error - StopResponse error - - ReadResourceCalled bool - ReadResourceResponse providers.ReadResourceResponse - ReadResourceRequest providers.ReadResourceRequest - ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse - - PlanResourceChangeCalled bool - PlanResourceChangeResponse providers.PlanResourceChangeResponse - PlanResourceChangeRequest providers.PlanResourceChangeRequest - PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse - - ApplyResourceChangeCalled bool - ApplyResourceChangeResponse providers.ApplyResourceChangeResponse - ApplyResourceChangeRequest providers.ApplyResourceChangeRequest - ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse - - ImportResourceStateCalled bool - ImportResourceStateResponse providers.ImportResourceStateResponse - ImportResourceStateRequest providers.ImportResourceStateRequest - ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse - // Legacy return type for existing tests, which will be shimmed into an - // ImportResourceStateResponse if set - ImportStateReturn []*InstanceState - - ReadDataSourceCalled bool - ReadDataSourceResponse providers.ReadDataSourceResponse - ReadDataSourceRequest providers.ReadDataSourceRequest - ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse - - CloseCalled bool - CloseError error - - // Legacy callbacks: if these are set, we will shim incoming calls for - // new-style methods to these old-fashioned terraform.ResourceProvider - // mock callbacks, for the benefit of older tests that were written against - // the old mock API. - ValidateFn func(c *ResourceConfig) (ws []string, es []error) - ConfigureFn func(c *ResourceConfig) error - DiffFn func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) - ApplyFn func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) -} - -func (p *MockProvider) GetSchema() providers.GetSchemaResponse { - p.Lock() - defer p.Unlock() - p.GetSchemaCalled = true - return p.getSchema() -} - -func (p *MockProvider) getSchema() providers.GetSchemaResponse { - // This version of getSchema doesn't do any locking, so it's suitable to - // call from other methods of this mock as long as they are already - // holding the lock. - - ret := providers.GetSchemaResponse{ - Provider: providers.Schema{}, - DataSources: map[string]providers.Schema{}, - ResourceTypes: map[string]providers.Schema{}, - } - if p.GetSchemaReturn != nil { - ret.Provider.Block = p.GetSchemaReturn.Provider - for n, s := range p.GetSchemaReturn.DataSources { - ret.DataSources[n] = providers.Schema{ - Block: s, - } - } - for n, s := range p.GetSchemaReturn.ResourceTypes { - ret.ResourceTypes[n] = providers.Schema{ - Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), - Block: s, - } - } - } - - return ret -} - -func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { - p.Lock() - defer p.Unlock() - - p.PrepareProviderConfigCalled = true - p.PrepareProviderConfigRequest = r - if p.PrepareProviderConfigFn != nil { - return p.PrepareProviderConfigFn(r) - } - return p.PrepareProviderConfigResponse -} - -func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateResourceTypeConfigCalled = true - p.ValidateResourceTypeConfigRequest = r - - if p.ValidateFn != nil { - resp := p.getSchema() - schema := resp.Provider.Block - rc := NewResourceConfigShimmed(r.Config, schema) - warns, errs := p.ValidateFn(rc) - ret := providers.ValidateResourceTypeConfigResponse{} - for _, warn := range warns { - ret.Diagnostics = ret.Diagnostics.Append(tfdiags.SimpleWarning(warn)) - } - for _, err := range errs { - ret.Diagnostics = ret.Diagnostics.Append(err) - } - } - if p.ValidateResourceTypeConfigFn != nil { - return p.ValidateResourceTypeConfigFn(r) - } - - return p.ValidateResourceTypeConfigResponse -} - -func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateDataSourceConfigCalled = true - p.ValidateDataSourceConfigRequest = r - - if p.ValidateDataSourceConfigFn != nil { - return p.ValidateDataSourceConfigFn(r) - } - - return p.ValidateDataSourceConfigResponse -} - -func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { - p.Lock() - defer p.Unlock() - - schemas := p.getSchema() - schema := schemas.ResourceTypes[r.TypeName] - schemaType := schema.Block.ImpliedType() - - p.UpgradeResourceStateCalled = true - p.UpgradeResourceStateRequest = r - - if p.UpgradeResourceStateFn != nil { - return p.UpgradeResourceStateFn(r) - } - - resp := p.UpgradeResourceStateResponse - - if resp.UpgradedState == cty.NilVal { - switch { - case r.RawStateFlatmap != nil: - v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - case len(r.RawStateJSON) > 0: - v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) - - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - } - } - return resp -} - -func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { - p.Lock() - defer p.Unlock() - - p.ConfigureCalled = true - p.ConfigureRequest = r - - if p.ConfigureFn != nil { - resp := p.getSchema() - schema := resp.Provider.Block - rc := NewResourceConfigShimmed(r.Config, schema) - ret := providers.ConfigureResponse{} - - err := p.ConfigureFn(rc) - if err != nil { - ret.Diagnostics = ret.Diagnostics.Append(err) - } - return ret - } - if p.ConfigureNewFn != nil { - return p.ConfigureNewFn(r) - } - - return p.ConfigureResponse -} - -func (p *MockProvider) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provider itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { - p.Lock() - defer p.Unlock() - - p.ReadResourceCalled = true - p.ReadResourceRequest = r - - if p.ReadResourceFn != nil { - return p.ReadResourceFn(r) - } - - // make sure the NewState fits the schema - newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(p.ReadResourceResponse.NewState) - if err != nil { - panic(err) - } - resp := p.ReadResourceResponse - resp.NewState = newState - - return resp -} - -func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - p.Lock() - defer p.Unlock() - - p.PlanResourceChangeCalled = true - p.PlanResourceChangeRequest = r - - if p.DiffFn != nil { - ps := p.getSchema() - if ps.ResourceTypes == nil || ps.ResourceTypes[r.TypeName].Block == nil { - return providers.PlanResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Printf("mock provider has no schema for resource type %s", r.TypeName)), - } - } - schema := ps.ResourceTypes[r.TypeName].Block - info := &InstanceInfo{ - Type: r.TypeName, - } - priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0) - cfg := NewResourceConfigShimmed(r.Config, schema) - - legacyDiff, err := p.DiffFn(info, priorState, cfg) - - var res providers.PlanResourceChangeResponse - res.PlannedState = r.ProposedNewState - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - if legacyDiff != nil { - newVal, err := legacyDiff.ApplyToValue(r.PriorState, schema) - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - - res.PlannedState = newVal - - var requiresNew []string - for attr, d := range legacyDiff.Attributes { - if d.RequiresNew { - requiresNew = append(requiresNew, attr) - } - } - requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schema.ImpliedType()) - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - res.RequiresReplace = requiresReplace - } - return res - } - if p.PlanResourceChangeFn != nil { - return p.PlanResourceChangeFn(r) - } - - return p.PlanResourceChangeResponse -} - -func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - p.Lock() - p.ApplyResourceChangeCalled = true - p.ApplyResourceChangeRequest = r - p.Unlock() - - if p.ApplyFn != nil { - // ApplyFn is a special callback fashioned after our old provider - // interface, which expected to be given an actual diff rather than - // separate old/new values to apply. Therefore we need to approximate - // a diff here well enough that _most_ of our legacy ApplyFns in old - // tests still see the behavior they are expecting. New tests should - // not use this, and should instead use ApplyResourceChangeFn directly. - providerSchema := p.getSchema() - schema, ok := providerSchema.ResourceTypes[r.TypeName] - if !ok { - return providers.ApplyResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("no mocked schema available for resource type %s", r.TypeName)), - } - } - - info := &InstanceInfo{ - Type: r.TypeName, - } - - priorVal := r.PriorState - plannedVal := r.PlannedState - priorMap := hcl2shim.FlatmapValueFromHCL2(priorVal) - plannedMap := hcl2shim.FlatmapValueFromHCL2(plannedVal) - s := NewInstanceStateShimmedFromValue(priorVal, 0) - d := &InstanceDiff{ - Attributes: make(map[string]*ResourceAttrDiff), - } - if plannedMap == nil { // destroying, then - d.Destroy = true - // Destroy diffs don't have any attribute diffs - } else { - if priorMap == nil { // creating, then - // We'll just make an empty prior map to make things easier below. - priorMap = make(map[string]string) - } - - for k, new := range plannedMap { - old := priorMap[k] - newComputed := false - if new == hcl2shim.UnknownVariableValue { - new = "" - newComputed = true - } - d.Attributes[k] = &ResourceAttrDiff{ - Old: old, - New: new, - NewComputed: newComputed, - Type: DiffAttrInput, // not generally used in tests, so just hard-coded - } - } - // Also need any attributes that were removed in "planned" - for k, old := range priorMap { - if _, ok := plannedMap[k]; ok { - continue - } - d.Attributes[k] = &ResourceAttrDiff{ - Old: old, - NewRemoved: true, - Type: DiffAttrInput, - } - } - } - newState, err := p.ApplyFn(info, s, d) - resp := providers.ApplyResourceChangeResponse{} - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - if newState != nil { - var newVal cty.Value - if newState != nil { - var err error - newVal, err = newState.AttrsAsObjectValue(schema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - } else { - // If apply returned a nil new state then that's the old way to - // indicate that the object was destroyed. Our new interface calls - // for that to be signalled as a null value. - newVal = cty.NullVal(schema.Block.ImpliedType()) - } - resp.NewState = newVal - } - - return resp - } - if p.ApplyResourceChangeFn != nil { - return p.ApplyResourceChangeFn(r) - } - - return p.ApplyResourceChangeResponse -} - -func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { - p.Lock() - defer p.Unlock() - - if p.ImportStateReturn != nil { - for _, is := range p.ImportStateReturn { - if is.Attributes == nil { - is.Attributes = make(map[string]string) - } - is.Attributes["id"] = is.ID - - typeName := is.Ephemeral.Type - // Use the requested type if the resource has no type of it's own. - // We still return the empty type, which will error, but this prevents a panic. - if typeName == "" { - typeName = r.TypeName - } - - schema := p.GetSchemaReturn.ResourceTypes[typeName] - if schema == nil { - panic("no schema found for " + typeName) - } - - private, err := json.Marshal(is.Meta) - if err != nil { - panic(err) - } - - state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) - if err != nil { - panic(err) - } - - state, err = schema.CoerceValue(state) - if err != nil { - panic(err) - } - - p.ImportResourceStateResponse.ImportedResources = append( - p.ImportResourceStateResponse.ImportedResources, - providers.ImportedResource{ - TypeName: is.Ephemeral.Type, - State: state, - Private: private, - }) - } - } - - p.ImportResourceStateCalled = true - p.ImportResourceStateRequest = r - if p.ImportResourceStateFn != nil { - return p.ImportResourceStateFn(r) - } - - return p.ImportResourceStateResponse -} - -func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - p.Lock() - defer p.Unlock() - - p.ReadDataSourceCalled = true - p.ReadDataSourceRequest = r - - if p.ReadDataSourceFn != nil { - return p.ReadDataSourceFn(r) - } - - return p.ReadDataSourceResponse -} - -func (p *MockProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go deleted file mode 100644 index 93b19be5..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go +++ /dev/null @@ -1,154 +0,0 @@ -package terraform - -import ( - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" -) - -var _ provisioners.Interface = (*MockProvisioner)(nil) - -// MockProvisioner implements provisioners.Interface but mocks out all the -// calls for testing purposes. -type MockProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaResponse provisioners.GetSchemaResponse - - ValidateProvisionerConfigCalled bool - ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest - ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse - ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse - - ProvisionResourceCalled bool - ProvisionResourceRequest provisioners.ProvisionResourceRequest - ProvisionResourceResponse provisioners.ProvisionResourceResponse - ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse - - StopCalled bool - StopResponse error - StopFn func() error - - CloseCalled bool - CloseResponse error - CloseFn func() error - - // Legacy callbacks: if these are set, we will shim incoming calls for - // new-style methods to these old-fashioned terraform.ResourceProvider - // mock callbacks, for the benefit of older tests that were written against - // the old mock API. - ApplyFn func(rs *InstanceState, c *ResourceConfig) error -} - -func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { - p.Lock() - defer p.Unlock() - - p.GetSchemaCalled = true - return p.getSchema() -} - -// getSchema is the implementation of GetSchema, which can be called from other -// methods on MockProvisioner that may already be holding the lock. -func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { - return p.GetSchemaResponse -} - -func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateProvisionerConfigCalled = true - p.ValidateProvisionerConfigRequest = r - if p.ValidateProvisionerConfigFn != nil { - return p.ValidateProvisionerConfigFn(r) - } - return p.ValidateProvisionerConfigResponse -} - -func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { - p.Lock() - defer p.Unlock() - - p.ProvisionResourceCalled = true - p.ProvisionResourceRequest = r - if p.ApplyFn != nil { - if !r.Config.IsKnown() { - panic(fmt.Sprintf("cannot provision with unknown value: %#v", r.Config)) - } - - schema := p.getSchema() - rc := NewResourceConfigShimmed(r.Config, schema.Provisioner) - connVal := r.Connection - connMap := map[string]string{} - - if !connVal.IsNull() && connVal.IsKnown() { - for it := connVal.ElementIterator(); it.Next(); { - ak, av := it.Element() - name := ak.AsString() - - if !av.IsKnown() || av.IsNull() { - continue - } - - av, _ = convert.Convert(av, cty.String) - connMap[name] = av.AsString() - } - } - - // We no longer pass the full instance state to a provisioner, so we'll - // construct a partial one that should be good enough for what existing - // test mocks need. - is := &InstanceState{ - Ephemeral: EphemeralState{ - ConnInfo: connMap, - }, - } - var resp provisioners.ProvisionResourceResponse - err := p.ApplyFn(is, rc) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - return resp - } - if p.ProvisionResourceFn != nil { - fn := p.ProvisionResourceFn - p.Unlock() - return fn(r) - } - - return p.ProvisionResourceResponse -} - -func (p *MockProvisioner) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provisioner itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvisioner) Close() error { - p.Lock() - defer p.Unlock() - - p.CloseCalled = true - if p.CloseFn != nil { - return p.CloseFn() - } - - return p.CloseResponse -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go deleted file mode 100644 index 8a683012..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go +++ /dev/null @@ -1,618 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// ResourceAddress is a way of identifying an individual resource (or, -// eventually, a subset of resources) within the state. It is used for Targets. -type ResourceAddress struct { - // Addresses a resource falling somewhere in the module path - // When specified alone, addresses all resources within a module path - Path []string - - // Addresses a specific resource that occurs in a list - Index int - - InstanceType InstanceType - InstanceTypeSet bool - Name string - Type string - Mode ResourceMode // significant only if InstanceTypeSet -} - -// Copy returns a copy of this ResourceAddress -func (r *ResourceAddress) Copy() *ResourceAddress { - if r == nil { - return nil - } - - n := &ResourceAddress{ - Path: make([]string, 0, len(r.Path)), - Index: r.Index, - InstanceType: r.InstanceType, - Name: r.Name, - Type: r.Type, - Mode: r.Mode, - } - - n.Path = append(n.Path, r.Path...) - - return n -} - -// String outputs the address that parses into this address. -func (r *ResourceAddress) String() string { - var result []string - for _, p := range r.Path { - result = append(result, "module", p) - } - - switch r.Mode { - case ManagedResourceMode: - // nothing to do - case DataResourceMode: - result = append(result, "data") - default: - panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) - } - - if r.Type != "" { - result = append(result, r.Type) - } - - if r.Name != "" { - name := r.Name - if r.InstanceTypeSet { - switch r.InstanceType { - case TypePrimary: - name += ".primary" - case TypeDeposed: - name += ".deposed" - case TypeTainted: - name += ".tainted" - } - } - - if r.Index >= 0 { - name += fmt.Sprintf("[%d]", r.Index) - } - result = append(result, name) - } - - return strings.Join(result, ".") -} - -// HasResourceSpec returns true if the address has a resource spec, as -// defined in the documentation: -// https://www.terraform.io/docs/internals/resource-addressing.html -// In particular, this returns false if the address contains only -// a module path, thus addressing the entire module. -func (r *ResourceAddress) HasResourceSpec() bool { - return r.Type != "" && r.Name != "" -} - -// WholeModuleAddress returns the resource address that refers to all -// resources in the same module as the receiver address. -func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { - return &ResourceAddress{ - Path: r.Path, - Index: -1, - InstanceTypeSet: false, - } -} - -// MatchesResourceConfig returns true if the receiver matches the given -// configuration resource within the given _static_ module path. Note that -// the module path in a resource address is a _dynamic_ module path, and -// multiple dynamic resource paths may map to a single static path if -// count and for_each are in use on module calls. -// -// Since resource configuration blocks represent all of the instances of -// a multi-instance resource, the index of the address (if any) is not -// considered. -func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool { - if r.HasResourceSpec() { - // FIXME: Some ugliness while we are between worlds. Functionality - // in "addrs" should eventually replace this ResourceAddress idea - // completely, but for now we'll need to translate to the old - // way of representing resource modes. - switch r.Mode { - case ManagedResourceMode: - if rc.Mode != addrs.ManagedResourceMode { - return false - } - case DataResourceMode: - if rc.Mode != addrs.DataResourceMode { - return false - } - } - if r.Type != rc.Type || r.Name != rc.Name { - return false - } - } - - addrPath := r.Path - - // normalize - if len(addrPath) == 0 { - addrPath = nil - } - if len(path) == 0 { - path = nil - } - rawPath := []string(path) - return reflect.DeepEqual(addrPath, rawPath) -} - -// stateId returns the ID that this resource should be entered with -// in the state. This is also used for diffs. In the future, we'd like to -// move away from this string field so I don't export this. -func (r *ResourceAddress) stateId() string { - result := fmt.Sprintf("%s.%s", r.Type, r.Name) - switch r.Mode { - case ManagedResourceMode: - // Done - case DataResourceMode: - result = fmt.Sprintf("data.%s", result) - default: - panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) - } - if r.Index >= 0 { - result += fmt.Sprintf(".%d", r.Index) - } - - return result -} - -// parseResourceAddressInternal parses the somewhat bespoke resource -// identifier used in states and diffs, such as "instance.name.0". -func parseResourceAddressInternal(s string) (*ResourceAddress, error) { - // Split based on ".". Every resource address should have at least two - // elements (type and name). - parts := strings.Split(s, ".") - if len(parts) < 2 || len(parts) > 4 { - return nil, fmt.Errorf("Invalid internal resource address format: %s", s) - } - - // Data resource if we have at least 3 parts and the first one is data - mode := ManagedResourceMode - if len(parts) > 2 && parts[0] == "data" { - mode = DataResourceMode - parts = parts[1:] - } - - // If we're not a data resource and we have more than 3, then it is an error - if len(parts) > 3 && mode != DataResourceMode { - return nil, fmt.Errorf("Invalid internal resource address format: %s", s) - } - - // Build the parts of the resource address that are guaranteed to exist - addr := &ResourceAddress{ - Type: parts[0], - Name: parts[1], - Index: -1, - InstanceType: TypePrimary, - Mode: mode, - } - - // If we have more parts, then we have an index. Parse that. - if len(parts) > 2 { - idx, err := strconv.ParseInt(parts[2], 0, 0) - if err != nil { - return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) - } - - addr.Index = int(idx) - } - - return addr, nil -} - -func ParseResourceAddress(s string) (*ResourceAddress, error) { - matches, err := tokenizeResourceAddress(s) - if err != nil { - return nil, err - } - mode := ManagedResourceMode - if matches["data_prefix"] != "" { - mode = DataResourceMode - } - resourceIndex, err := ParseResourceIndex(matches["index"]) - if err != nil { - return nil, err - } - instanceType, err := ParseInstanceType(matches["instance_type"]) - if err != nil { - return nil, err - } - path := ParseResourcePath(matches["path"]) - - // not allowed to say "data." without a type following - if mode == DataResourceMode && matches["type"] == "" { - return nil, fmt.Errorf( - "invalid resource address %q: must target specific data instance", - s, - ) - } - - return &ResourceAddress{ - Path: path, - Index: resourceIndex, - InstanceType: instanceType, - InstanceTypeSet: matches["instance_type"] != "", - Name: matches["name"], - Type: matches["type"], - Mode: mode, - }, nil -} - -// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a -// resource name as described in a module diff. -// -// For historical reasons a different addressing format is used in this -// context. The internal format should not be shown in the UI and instead -// this function should be used to translate to a ResourceAddress and -// then, where appropriate, use the String method to produce a canonical -// resource address string for display in the UI. -// -// The given path slice must be empty (or nil) for the root module, and -// otherwise consist of a sequence of module names traversing down into -// the module tree. If a non-nil path is provided, the caller must not -// modify its underlying array after passing it to this function. -func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { - addr, err := parseResourceAddressInternal(key) - if err != nil { - return nil, err - } - addr.Path = path - return addr, nil -} - -// NewLegacyResourceAddress creates a ResourceAddress from a new-style -// addrs.AbsResource value. -// -// This is provided for shimming purposes so that we can still easily call into -// older functions that expect the ResourceAddress type. -func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress { - ret := &ResourceAddress{ - Type: addr.Resource.Type, - Name: addr.Resource.Name, - } - - switch addr.Resource.Mode { - case addrs.ManagedResourceMode: - ret.Mode = ManagedResourceMode - case addrs.DataResourceMode: - ret.Mode = DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Mode)) - } - - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - // At the time of writing this can't happen because we don't - // ket generate keyed module instances. This legacy codepath must - // be removed before we can support "count" and "for_each" for - // modules. - panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) - } - - path[i] = step.Name - } - ret.Path = path - ret.Index = -1 - - return ret -} - -// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style -// addrs.AbsResource value. -// -// This is provided for shimming purposes so that we can still easily call into -// older functions that expect the ResourceAddress type. -func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress { - ret := &ResourceAddress{ - Type: addr.Resource.Resource.Type, - Name: addr.Resource.Resource.Name, - } - - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - ret.Mode = ManagedResourceMode - case addrs.DataResourceMode: - ret.Mode = DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Resource.Mode)) - } - - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - // At the time of writing this can't happen because we don't - // ket generate keyed module instances. This legacy codepath must - // be removed before we can support "count" and "for_each" for - // modules. - panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) - } - - path[i] = step.Name - } - ret.Path = path - - if addr.Resource.Key == addrs.NoKey { - ret.Index = -1 - } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { - ret.Index = int(ik) - } else if _, ok := addr.Resource.Key.(addrs.StringKey); ok { - ret.Index = -1 - } else { - panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) - } - - return ret -} - -// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to -// the new resource address type addrs.AbsResourceInstance. -// -// This method can be used only on an address that has a resource specification. -// It will panic if called on a module-path-only ResourceAddress. Use -// method HasResourceSpec to check before calling, in contexts where it is -// unclear. -// -// addrs.AbsResourceInstance does not represent the "tainted" and "deposed" -// states, and so if these are present on the receiver then they are discarded. -// -// This is provided for shimming purposes so that we can easily adapt functions -// that are returning the legacy ResourceAddress type, for situations where -// the new type is required. -func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance { - if !addr.HasResourceSpec() { - panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec") - } - - ret := addrs.AbsResourceInstance{ - Module: addr.ModuleInstanceAddr(), - Resource: addrs.ResourceInstance{ - Resource: addrs.Resource{ - Type: addr.Type, - Name: addr.Name, - }, - }, - } - - switch addr.Mode { - case ManagedResourceMode: - ret.Resource.Resource.Mode = addrs.ManagedResourceMode - case DataResourceMode: - ret.Resource.Resource.Mode = addrs.DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode)) - } - - if addr.Index != -1 { - ret.Resource.Key = addrs.IntKey(addr.Index) - } - - return ret -} - -// ModuleInstanceAddr returns the module path portion of the receiver as a -// addrs.ModuleInstance value. -func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance { - path := make(addrs.ModuleInstance, len(addr.Path)) - for i, name := range addr.Path { - path[i] = addrs.ModuleInstanceStep{Name: name} - } - return path -} - -// Contains returns true if and only if the given node is contained within -// the receiver. -// -// Containment is defined in terms of the module and resource heirarchy: -// a resource is contained within its module and any ancestor modules, -// an indexed resource instance is contained with the unindexed resource, etc. -func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { - ourPath := addr.Path - givenPath := other.Path - if len(givenPath) < len(ourPath) { - return false - } - for i := range ourPath { - if ourPath[i] != givenPath[i] { - return false - } - } - - // If the receiver is a whole-module address then the path prefix - // matching is all we need. - if !addr.HasResourceSpec() { - return true - } - - if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { - return false - } - - if addr.Index != -1 && addr.Index != other.Index { - return false - } - - if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { - return false - } - - return true -} - -// Equals returns true if the receiver matches the given address. -// -// The name of this method is a misnomer, since it doesn't test for exact -// equality. Instead, it tests that the _specified_ parts of each -// address match, treating any unspecified parts as wildcards. -// -// See also Contains, which takes a more heirarchical approach to comparing -// addresses. -func (addr *ResourceAddress) Equals(raw interface{}) bool { - other, ok := raw.(*ResourceAddress) - if !ok { - return false - } - - pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || - reflect.DeepEqual(addr.Path, other.Path) - - indexMatch := addr.Index == -1 || - other.Index == -1 || - addr.Index == other.Index - - nameMatch := addr.Name == "" || - other.Name == "" || - addr.Name == other.Name - - typeMatch := addr.Type == "" || - other.Type == "" || - addr.Type == other.Type - - // mode is significant only when type is set - modeMatch := addr.Type == "" || - other.Type == "" || - addr.Mode == other.Mode - - return pathMatch && - indexMatch && - addr.InstanceType == other.InstanceType && - nameMatch && - typeMatch && - modeMatch -} - -// Less returns true if and only if the receiver should be sorted before -// the given address when presenting a list of resource addresses to -// an end-user. -// -// This sort uses lexicographic sorting for most components, but uses -// numeric sort for indices, thus causing index 10 to sort after -// index 9, rather than after index 1. -func (addr *ResourceAddress) Less(other *ResourceAddress) bool { - - switch { - - case len(addr.Path) != len(other.Path): - return len(addr.Path) < len(other.Path) - - case !reflect.DeepEqual(addr.Path, other.Path): - // If the two paths are the same length but don't match, we'll just - // cheat and compare the string forms since it's easier than - // comparing all of the path segments in turn, and lexicographic - // comparison is correct for the module path portion. - addrStr := addr.String() - otherStr := other.String() - return addrStr < otherStr - - case addr.Mode != other.Mode: - return addr.Mode == DataResourceMode - - case addr.Type != other.Type: - return addr.Type < other.Type - - case addr.Name != other.Name: - return addr.Name < other.Name - - case addr.Index != other.Index: - // Since "Index" is -1 for an un-indexed address, this also conveniently - // sorts unindexed addresses before indexed ones, should they both - // appear for some reason. - return addr.Index < other.Index - - case addr.InstanceTypeSet != other.InstanceTypeSet: - return !addr.InstanceTypeSet - - case addr.InstanceType != other.InstanceType: - // InstanceType is actually an enum, so this is just an arbitrary - // sort based on the enum numeric values, and thus not particularly - // meaningful. - return addr.InstanceType < other.InstanceType - - default: - return false - - } -} - -func ParseResourceIndex(s string) (int, error) { - if s == "" { - return -1, nil - } - return strconv.Atoi(s) -} - -func ParseResourcePath(s string) []string { - if s == "" { - return nil - } - parts := strings.Split(s, ".") - path := make([]string, 0, len(parts)) - for _, s := range parts { - // Due to the limitations of the regexp match below, the path match has - // some noise in it we have to filter out :| - if s == "" || s == "module" { - continue - } - path = append(path, s) - } - return path -} - -func ParseInstanceType(s string) (InstanceType, error) { - switch s { - case "", "primary": - return TypePrimary, nil - case "deposed": - return TypeDeposed, nil - case "tainted": - return TypeTainted, nil - default: - return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) - } -} - -func tokenizeResourceAddress(s string) (map[string]string, error) { - // Example of portions of the regexp below using the - // string "aws_instance.web.tainted[1]" - re := regexp.MustCompile(`\A` + - // "module.foo.module.bar" (optional) - `(?P(?:module\.(?P[^.]+)\.?)*)` + - // possibly "data.", if targeting is a data resource - `(?P(?:data\.)?)` + - // "aws_instance.web" (optional when module path specified) - `(?:(?P[^.]+)\.(?P[^.[]+))?` + - // "tainted" (optional, omission implies: "primary") - `(?:\.(?P\w+))?` + - // "1" (optional, omission implies: "0") - `(?:\[(?P\d+)\])?` + - `\z`) - - groupNames := re.SubexpNames() - rawMatches := re.FindAllStringSubmatch(s, -1) - if len(rawMatches) != 1 { - return nil, fmt.Errorf("invalid resource address %q", s) - } - - matches := make(map[string]string) - for i, m := range rawMatches[0] { - matches[groupNames[i]] = m - } - - return matches, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go deleted file mode 100644 index fec45967..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go +++ /dev/null @@ -1,319 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" -) - -// ResourceProvider is an interface that must be implemented by any -// resource provider: the thing that creates and manages the resources in -// a Terraform configuration. -// -// Important implementation note: All returned pointers, such as -// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to -// shared data. Terraform is highly parallel and assumes that this data is safe -// to read/write in parallel so it must be unique references. Note that it is -// safe to return arguments as results, however. -type ResourceProvider interface { - /********************************************************************* - * Functions related to the provider - *********************************************************************/ - - // GetSchema returns the config schema for the main provider - // configuration, as would appear in a "provider" block in the - // configuration files. - // - // Currently not all providers support schema. Callers must therefore - // first call Resources and DataSources and ensure that at least one - // resource or data source has the SchemaAvailable flag set. - GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) - - // Input was used prior to v0.12 to ask the provider to prompt the user - // for input to complete the configuration. - // - // From v0.12 onwards this method is never called because Terraform Core - // is able to handle the necessary input logic itself based on the - // schema returned from GetSchema. - Input(UIInput, *ResourceConfig) (*ResourceConfig, error) - - // Validate is called once at the beginning with the raw configuration - // (no interpolation done) and can return a list of warnings and/or - // errors. - // - // This is called once with the provider configuration only. It may not - // be called at all if no provider configuration is given. - // - // This should not assume that any values of the configurations are valid. - // The primary use case of this call is to check that required keys are - // set. - Validate(*ResourceConfig) ([]string, []error) - - // Configure configures the provider itself with the configuration - // given. This is useful for setting things like access keys. - // - // This won't be called at all if no provider configuration is given. - // - // Configure returns an error if it occurred. - Configure(*ResourceConfig) error - - // Resources returns all the available resource types that this provider - // knows how to manage. - Resources() []ResourceType - - // Stop is called when the provider should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - /********************************************************************* - * Functions related to individual resources - *********************************************************************/ - - // ValidateResource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateResource(string, *ResourceConfig) ([]string, []error) - - // Apply applies a diff to a specific resource and returns the new - // resource state along with an error. - // - // If the resource state given has an empty ID, then a new resource - // is expected to be created. - Apply( - *InstanceInfo, - *InstanceState, - *InstanceDiff) (*InstanceState, error) - - // Diff diffs a resource versus a desired state and returns - // a diff. - Diff( - *InstanceInfo, - *InstanceState, - *ResourceConfig) (*InstanceDiff, error) - - // Refresh refreshes a resource and updates all of its attributes - // with the latest information. - Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) - - /********************************************************************* - * Functions related to importing - *********************************************************************/ - - // ImportState requests that the given resource be imported. - // - // The returned InstanceState only requires ID be set. Importing - // will always call Refresh after the state to complete it. - // - // IMPORTANT: InstanceState doesn't have the resource type attached - // to it. A type must be specified on the state via the Ephemeral - // field on the state. - // - // This function can return multiple states. Normally, an import - // will map 1:1 to a physical resource. However, some resources map - // to multiple. For example, an AWS security group may contain many rules. - // Each rule is represented by a separate resource in Terraform, - // therefore multiple states are returned. - ImportState(*InstanceInfo, string) ([]*InstanceState, error) - - /********************************************************************* - * Functions related to data resources - *********************************************************************/ - - // ValidateDataSource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per data source instance. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateDataSource(string, *ResourceConfig) ([]string, []error) - - // DataSources returns all of the available data sources that this - // provider implements. - DataSources() []DataSource - - // ReadDataDiff produces a diff that represents the state that will - // be produced when the given data source is read using a later call - // to ReadDataApply. - ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - - // ReadDataApply initializes a data instance using the configuration - // in a diff produced by ReadDataDiff. - ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) -} - -// ResourceProviderCloser is an interface that providers that can close -// connections that aren't needed anymore must implement. -type ResourceProviderCloser interface { - Close() error -} - -// ResourceType is a type of resource that a resource provider can manage. -type ResourceType struct { - Name string // Name of the resource, example "instance" (no provider prefix) - Importable bool // Whether this resource supports importing - - // SchemaAvailable is set if the provider supports the ProviderSchema, - // ResourceTypeSchema and DataSourceSchema methods. Although it is - // included on each resource type, it's actually a provider-wide setting - // that's smuggled here only because that avoids a breaking change to - // the plugin protocol. - SchemaAvailable bool -} - -// DataSource is a data source that a resource provider implements. -type DataSource struct { - Name string - - // SchemaAvailable is set if the provider supports the ProviderSchema, - // ResourceTypeSchema and DataSourceSchema methods. Although it is - // included on each resource type, it's actually a provider-wide setting - // that's smuggled here only because that avoids a breaking change to - // the plugin protocol. - SchemaAvailable bool -} - -// ResourceProviderResolver is an interface implemented by objects that are -// able to resolve a given set of resource provider version constraints -// into ResourceProviderFactory callbacks. -type ResourceProviderResolver interface { - // Given a constraint map, return a ResourceProviderFactory for each - // requested provider. If some or all of the constraints cannot be - // satisfied, return a non-nil slice of errors describing the problems. - ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) -} - -// ResourceProviderResolverFunc wraps a callback function and turns it into -// a ResourceProviderResolver implementation, for convenience in situations -// where a function and its associated closure are sufficient as a resolver -// implementation. -type ResourceProviderResolverFunc func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) - -// ResolveProviders implements ResourceProviderResolver by calling the -// wrapped function. -func (f ResourceProviderResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { - return f(reqd) -} - -// ResourceProviderResolverFixed returns a ResourceProviderResolver that -// has a fixed set of provider factories provided by the caller. The returned -// resolver ignores version constraints entirely and just returns the given -// factory for each requested provider name. -// -// This function is primarily used in tests, to provide mock providers or -// in-process providers under test. -func ResourceProviderResolverFixed(factories map[string]ResourceProviderFactory) ResourceProviderResolver { - return ResourceProviderResolverFunc(func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { - ret := make(map[string]ResourceProviderFactory, len(reqd)) - var errs []error - for name := range reqd { - if factory, exists := factories[name]; exists { - ret[name] = factory - } else { - errs = append(errs, fmt.Errorf("provider %q is not available", name)) - } - } - return ret, errs - }) -} - -// ResourceProviderFactory is a function type that creates a new instance -// of a resource provider. -type ResourceProviderFactory func() (ResourceProvider, error) - -// ResourceProviderFactoryFixed is a helper that creates a -// ResourceProviderFactory that just returns some fixed provider. -func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { - return func() (ResourceProvider, error) { - return p, nil - } -} - -func ProviderHasResource(p ResourceProvider, n string) bool { - for _, rt := range p.Resources() { - if rt.Name == n { - return true - } - } - - return false -} - -func ProviderHasDataSource(p ResourceProvider, n string) bool { - for _, rt := range p.DataSources() { - if rt.Name == n { - return true - } - } - - return false -} - -// resourceProviderFactories matches available plugins to the given version -// requirements to produce a map of compatible provider plugins if possible, -// or an error if the currently-available plugins are insufficient. -// -// This should be called only with configurations that have passed calls -// to config.Validate(), which ensures that all of the given version -// constraints are valid. It will panic if any invalid constraints are present. -func resourceProviderFactories(resolver providers.Resolver, reqd discovery.PluginRequirements) (map[string]providers.Factory, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret, errs := resolver.ResolveProviders(reqd) - if errs != nil { - diags = diags.Append( - tfdiags.Sourceless(tfdiags.Error, - "Could not satisfy plugin requirements", - errPluginInit, - ), - ) - - for _, err := range errs { - diags = diags.Append(err) - } - - return nil, diags - } - - return ret, nil -} - -const errPluginInit = ` -Plugin reinitialization required. Please run "terraform init". - -Plugins are external binaries that Terraform uses to access and manipulate -resources. The configuration provided requires plugins which can't be located, -don't satisfy the version constraints, or are otherwise incompatible. - -Terraform automatically discovers provider requirements from your -configuration, including providers used in child modules. To see the -requirements and constraints from each module, run "terraform providers". -` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go deleted file mode 100644 index 4000e3d2..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go +++ /dev/null @@ -1,315 +0,0 @@ -package terraform - -import ( - "sync" -) - -// MockResourceProvider implements ResourceProvider but mocks out all the -// calls for testing purposes. -type MockResourceProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - CloseCalled bool - CloseError error - GetSchemaCalled bool - GetSchemaRequest *ProviderSchemaRequest - GetSchemaReturn *ProviderSchema - GetSchemaReturnError error - InputCalled bool - InputInput UIInput - InputConfig *ResourceConfig - InputReturnConfig *ResourceConfig - InputReturnError error - InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) - ApplyCalled bool - ApplyInfo *InstanceInfo - ApplyState *InstanceState - ApplyDiff *InstanceDiff - ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) - ApplyReturn *InstanceState - ApplyReturnError error - ConfigureCalled bool - ConfigureConfig *ResourceConfig - ConfigureFn func(*ResourceConfig) error - ConfigureReturnError error - DiffCalled bool - DiffInfo *InstanceInfo - DiffState *InstanceState - DiffDesired *ResourceConfig - DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) - DiffReturn *InstanceDiff - DiffReturnError error - RefreshCalled bool - RefreshInfo *InstanceInfo - RefreshState *InstanceState - RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) - RefreshReturn *InstanceState - RefreshReturnError error - ResourcesCalled bool - ResourcesReturn []ResourceType - ReadDataApplyCalled bool - ReadDataApplyInfo *InstanceInfo - ReadDataApplyDiff *InstanceDiff - ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) - ReadDataApplyReturn *InstanceState - ReadDataApplyReturnError error - ReadDataDiffCalled bool - ReadDataDiffInfo *InstanceInfo - ReadDataDiffDesired *ResourceConfig - ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - ReadDataDiffReturn *InstanceDiff - ReadDataDiffReturnError error - StopCalled bool - StopFn func() error - StopReturnError error - DataSourcesCalled bool - DataSourcesReturn []DataSource - ValidateCalled bool - ValidateConfig *ResourceConfig - ValidateFn func(*ResourceConfig) ([]string, []error) - ValidateReturnWarns []string - ValidateReturnErrors []error - ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) - ValidateResourceCalled bool - ValidateResourceType string - ValidateResourceConfig *ResourceConfig - ValidateResourceReturnWarns []string - ValidateResourceReturnErrors []error - ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) - ValidateDataSourceCalled bool - ValidateDataSourceType string - ValidateDataSourceConfig *ResourceConfig - ValidateDataSourceReturnWarns []string - ValidateDataSourceReturnErrors []error - - ImportStateCalled bool - ImportStateInfo *InstanceInfo - ImportStateID string - ImportStateReturn []*InstanceState - ImportStateReturnError error - ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) -} - -func (p *MockResourceProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} - -func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { - p.Lock() - defer p.Unlock() - - p.GetSchemaCalled = true - p.GetSchemaRequest = req - return p.GetSchemaReturn, p.GetSchemaReturnError -} - -func (p *MockResourceProvider) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - p.Lock() - defer p.Unlock() - p.InputCalled = true - p.InputInput = input - p.InputConfig = c - if p.InputFn != nil { - return p.InputFn(input, c) - } - return p.InputReturnConfig, p.InputReturnError -} - -func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateCalled = true - p.ValidateConfig = c - if p.ValidateFn != nil { - return p.ValidateFn(c) - } - return p.ValidateReturnWarns, p.ValidateReturnErrors -} - -func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateResourceCalled = true - p.ValidateResourceType = t - p.ValidateResourceConfig = c - - if p.ValidateResourceFn != nil { - return p.ValidateResourceFn(t, c) - } - - return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors -} - -func (p *MockResourceProvider) Configure(c *ResourceConfig) error { - p.Lock() - defer p.Unlock() - - p.ConfigureCalled = true - p.ConfigureConfig = c - - if p.ConfigureFn != nil { - return p.ConfigureFn(c) - } - - return p.ConfigureReturnError -} - -func (p *MockResourceProvider) Stop() error { - p.Lock() - defer p.Unlock() - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopReturnError -} - -func (p *MockResourceProvider) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // We only lock while writing data. Reading is fine - p.Lock() - p.ApplyCalled = true - p.ApplyInfo = info - p.ApplyState = state - p.ApplyDiff = diff - p.Unlock() - - if p.ApplyFn != nil { - return p.ApplyFn(info, state, diff) - } - - return p.ApplyReturn.DeepCopy(), p.ApplyReturnError -} - -func (p *MockResourceProvider) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() - - p.DiffCalled = true - p.DiffInfo = info - p.DiffState = state - p.DiffDesired = desired - - if p.DiffFn != nil { - return p.DiffFn(info, state, desired) - } - - return p.DiffReturn.DeepCopy(), p.DiffReturnError -} - -func (p *MockResourceProvider) Refresh( - info *InstanceInfo, - s *InstanceState) (*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.RefreshCalled = true - p.RefreshInfo = info - p.RefreshState = s - - if p.RefreshFn != nil { - return p.RefreshFn(info, s) - } - - return p.RefreshReturn.DeepCopy(), p.RefreshReturnError -} - -func (p *MockResourceProvider) Resources() []ResourceType { - p.Lock() - defer p.Unlock() - - p.ResourcesCalled = true - return p.ResourcesReturn -} - -func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.ImportStateCalled = true - p.ImportStateInfo = info - p.ImportStateID = id - if p.ImportStateFn != nil { - return p.ImportStateFn(info, id) - } - - var result []*InstanceState - if p.ImportStateReturn != nil { - result = make([]*InstanceState, len(p.ImportStateReturn)) - for i, v := range p.ImportStateReturn { - result[i] = v.DeepCopy() - } - } - - return result, p.ImportStateReturnError -} - -func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateDataSourceCalled = true - p.ValidateDataSourceType = t - p.ValidateDataSourceConfig = c - - if p.ValidateDataSourceFn != nil { - return p.ValidateDataSourceFn(t, c) - } - - return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors -} - -func (p *MockResourceProvider) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() - - p.ReadDataDiffCalled = true - p.ReadDataDiffInfo = info - p.ReadDataDiffDesired = desired - if p.ReadDataDiffFn != nil { - return p.ReadDataDiffFn(info, desired) - } - - return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError -} - -func (p *MockResourceProvider) ReadDataApply( - info *InstanceInfo, - d *InstanceDiff) (*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.ReadDataApplyCalled = true - p.ReadDataApplyInfo = info - p.ReadDataApplyDiff = d - - if p.ReadDataApplyFn != nil { - return p.ReadDataApplyFn(info, d) - } - - return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError -} - -func (p *MockResourceProvider) DataSources() []DataSource { - p.Lock() - defer p.Unlock() - - p.DataSourcesCalled = true - return p.DataSourcesReturn -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go deleted file mode 100644 index 74ee2a94..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" -) - -// ResourceProvisioner is an interface that must be implemented by any -// resource provisioner: the thing that initializes resources in -// a Terraform configuration. -type ResourceProvisioner interface { - // GetConfigSchema returns the schema for the provisioner type's main - // configuration block. This is called prior to Validate to enable some - // basic structural validation to be performed automatically and to allow - // the configuration to be properly extracted from potentially-ambiguous - // configuration file formats. - GetConfigSchema() (*configschema.Block, error) - - // Validate is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - Validate(*ResourceConfig) ([]string, []error) - - // Apply runs the provisioner on a specific resource and returns the new - // resource state along with an error. Instead of a diff, the ResourceConfig - // is provided since provisioners only run after a resource has been - // newly created. - Apply(UIOutput, *InstanceState, *ResourceConfig) error - - // Stop is called when the provisioner should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error -} - -// ResourceProvisionerCloser is an interface that provisioners that can close -// connections that aren't needed anymore must implement. -type ResourceProvisionerCloser interface { - Close() error -} - -// ResourceProvisionerFactory is a function type that creates a new instance -// of a resource provisioner. -type ResourceProvisionerFactory func() (ResourceProvisioner, error) - -// ProvisionerFactory is a function type that creates a new instance -// of a provisioners.Interface. -type ProvisionerFactory = provisioners.Factory diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go deleted file mode 100644 index ed6f241b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go +++ /dev/null @@ -1,87 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" -) - -// MockResourceProvisioner implements ResourceProvisioner but mocks out all the -// calls for testing purposes. -type MockResourceProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetConfigSchemaCalled bool - GetConfigSchemaReturnSchema *configschema.Block - GetConfigSchemaReturnError error - - ApplyCalled bool - ApplyOutput UIOutput - ApplyState *InstanceState - ApplyConfig *ResourceConfig - ApplyFn func(*InstanceState, *ResourceConfig) error - ApplyReturnError error - - ValidateCalled bool - ValidateConfig *ResourceConfig - ValidateFn func(c *ResourceConfig) ([]string, []error) - ValidateReturnWarns []string - ValidateReturnErrors []error - - StopCalled bool - StopFn func() error - StopReturnError error -} - -var _ ResourceProvisioner = (*MockResourceProvisioner)(nil) - -func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { - p.GetConfigSchemaCalled = true - return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError -} - -func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateCalled = true - p.ValidateConfig = c - if p.ValidateFn != nil { - return p.ValidateFn(c) - } - return p.ValidateReturnWarns, p.ValidateReturnErrors -} - -func (p *MockResourceProvisioner) Apply( - output UIOutput, - state *InstanceState, - c *ResourceConfig) error { - p.Lock() - - p.ApplyCalled = true - p.ApplyOutput = output - p.ApplyState = state - p.ApplyConfig = c - if p.ApplyFn != nil { - fn := p.ApplyFn - p.Unlock() - return fn(state, c) - } - - defer p.Unlock() - return p.ApplyReturnError -} - -func (p *MockResourceProvisioner) Stop() error { - p.Lock() - defer p.Unlock() - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopReturnError -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go deleted file mode 100644 index 8bc3b017..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go +++ /dev/null @@ -1,278 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Schemas is a container for various kinds of schema that Terraform needs -// during processing. -type Schemas struct { - Providers map[string]*ProviderSchema - Provisioners map[string]*configschema.Block -} - -// ProviderSchema returns the entire ProviderSchema object that was produced -// by the plugin for the given provider, or nil if no such schema is available. -// -// It's usually better to go use the more precise methods offered by type -// Schemas to handle this detail automatically. -func (ss *Schemas) ProviderSchema(typeName string) *ProviderSchema { - if ss.Providers == nil { - return nil - } - return ss.Providers[typeName] -} - -// ProviderConfig returns the schema for the provider configuration of the -// given provider type, or nil if no such schema is available. -func (ss *Schemas) ProviderConfig(typeName string) *configschema.Block { - ps := ss.ProviderSchema(typeName) - if ps == nil { - return nil - } - return ps.Provider -} - -// ResourceTypeConfig returns the schema for the configuration of a given -// resource type belonging to a given provider type, or nil of no such -// schema is available. -// -// In many cases the provider type is inferrable from the resource type name, -// but this is not always true because users can override the provider for -// a resource using the "provider" meta-argument. Therefore it's important to -// always pass the correct provider name, even though it many cases it feels -// redundant. -func (ss *Schemas) ResourceTypeConfig(providerType string, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { - ps := ss.ProviderSchema(providerType) - if ps == nil || ps.ResourceTypes == nil { - return nil, 0 - } - return ps.SchemaForResourceType(resourceMode, resourceType) -} - -// ProvisionerConfig returns the schema for the configuration of a given -// provisioner, or nil of no such schema is available. -func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { - return ss.Provisioners[name] -} - -// LoadSchemas searches the given configuration, state and plan (any of which -// may be nil) for constructs that have an associated schema, requests the -// necessary schemas from the given component factory (which must _not_ be nil), -// and returns a single object representing all of the necessary schemas. -// -// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing -// errors across multiple separate objects. Errors here will usually indicate -// either misbehavior on the part of one of the providers or of the provider -// protocol itself. When returned with errors, the returned schemas object is -// still valid but may be incomplete. -func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { - schemas := &Schemas{ - Providers: map[string]*ProviderSchema{}, - Provisioners: map[string]*configschema.Block{}, - } - var diags tfdiags.Diagnostics - - newDiags := loadProviderSchemas(schemas.Providers, config, state, components) - diags = diags.Append(newDiags) - newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) - diags = diags.Append(newDiags) - - return schemas, diags.Err() -} - -func loadProviderSchemas(schemas map[string]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(typeName string) { - if _, exists := schemas[typeName]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", typeName) - provider, err := components.ResourceProvider(typeName, "early/"+typeName) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[typeName] = &ProviderSchema{} - diags = diags.Append( - fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", typeName, err), - ) - return - } - defer func() { - provider.Close() - }() - - resp := provider.GetSchema() - if resp.Diagnostics.HasErrors() { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[typeName] = &ProviderSchema{} - diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provider %q: %s", typeName, resp.Diagnostics.Err()), - ) - return - } - - s := &ProviderSchema{ - Provider: resp.Provider.Block, - ResourceTypes: make(map[string]*configschema.Block), - DataSources: make(map[string]*configschema.Block), - - ResourceTypeSchemaVersions: make(map[string]uint64), - } - - if resp.Provider.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - diags = diags.Append( - fmt.Errorf("invalid negative schema version provider configuration for provider %q", typeName), - ) - } - - for t, r := range resp.ResourceTypes { - s.ResourceTypes[t] = r.Block - s.ResourceTypeSchemaVersions[t] = uint64(r.Version) - if r.Version < 0 { - diags = diags.Append( - fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, typeName), - ) - } - } - - for t, d := range resp.DataSources { - s.DataSources[t] = d.Block - if d.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - diags = diags.Append( - fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, typeName), - ) - } - } - - schemas[typeName] = s - } - - if config != nil { - for _, typeName := range config.ProviderTypes() { - ensure(typeName) - } - } - - if state != nil { - needed := providers.AddressedTypesAbs(state.ProviderAddrs()) - for _, typeName := range needed { - ensure(typeName) - } - } - - return diags -} - -func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(name string) { - if _, exists := schemas[name]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) - provisioner, err := components.ResourceProvisioner(name, "early/"+name) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[name] = &configschema.Block{} - diags = diags.Append( - fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), - ) - return - } - defer func() { - if closer, ok := provisioner.(ResourceProvisionerCloser); ok { - closer.Close() - } - }() - - resp := provisioner.GetSchema() - if resp.Diagnostics.HasErrors() { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[name] = &configschema.Block{} - diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), - ) - return - } - - schemas[name] = resp.Provisioner - } - - if config != nil { - for _, rc := range config.Module.ManagedResources { - for _, pc := range rc.Managed.Provisioners { - ensure(pc.Type) - } - } - - // Must also visit our child modules, recursively. - for _, cc := range config.Children { - childDiags := loadProvisionerSchemas(schemas, cc, components) - diags = diags.Append(childDiags) - } - } - - return diags -} - -// ProviderSchema represents the schema for a provider's own configuration -// and the configuration for some or all of its resources and data sources. -// -// The completeness of this structure depends on how it was constructed. -// When constructed for a configuration, it will generally include only -// resource types and data sources used by that configuration. -type ProviderSchema struct { - Provider *configschema.Block - ResourceTypes map[string]*configschema.Block - DataSources map[string]*configschema.Block - - ResourceTypeSchemaVersions map[string]uint64 -} - -// SchemaForResourceType attempts to find a schema for the given mode and type. -// Returns nil if no such schema is available. -func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { - switch mode { - case addrs.ManagedResourceMode: - return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] - case addrs.DataResourceMode: - // Data resources don't have schema versions right now, since state is discarded for each refresh - return ps.DataSources[typeName], 0 - default: - // Shouldn't happen, because the above cases are comprehensive. - return nil, 0 - } -} - -// SchemaForResourceAddr attempts to find a schema for the mode and type from -// the given resource address. Returns nil if no such schema is available. -func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { - return ps.SchemaForResourceType(addr.Mode, addr.Type) -} - -// ProviderSchemaRequest is used to describe to a ResourceProvider which -// aspects of schema are required, when calling the GetSchema method. -type ProviderSchemaRequest struct { - ResourceTypes []string - DataSources []string -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go deleted file mode 100644 index aa13cce8..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go +++ /dev/null @@ -1,189 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/mitchellh/copystructure" -) - -// upgradeStateV1ToV2 is used to upgrade a V1 state representation -// into a V2 state representation -func upgradeStateV1ToV2(old *stateV1) (*State, error) { - if old == nil { - return nil, nil - } - - remote, err := old.Remote.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - - modules := make([]*ModuleState, len(old.Modules)) - for i, module := range old.Modules { - upgraded, err := module.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - modules[i] = upgraded - } - if len(modules) == 0 { - modules = nil - } - - newState := &State{ - Version: 2, - Serial: old.Serial, - Remote: remote, - Modules: modules, - } - - newState.sort() - newState.init() - - return newState, nil -} - -func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { - if old == nil { - return nil, nil - } - - config, err := copystructure.Copy(old.Config) - if err != nil { - return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) - } - - return &RemoteState{ - Type: old.Type, - Config: config.(map[string]string), - }, nil -} - -func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { - if old == nil { - return nil, nil - } - - pathRaw, err := copystructure.Copy(old.Path) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - path, ok := pathRaw.([]string) - if !ok { - return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") - } - if len(path) == 0 { - // We found some V1 states with a nil path. Assume root and catch - // duplicate path errors later (as part of Validate). - path = rootModulePath - } - - // Outputs needs upgrading to use the new structure - outputs := make(map[string]*OutputState) - for key, output := range old.Outputs { - outputs[key] = &OutputState{ - Type: "string", - Value: output, - Sensitive: false, - } - } - - resources := make(map[string]*ResourceState) - for key, oldResource := range old.Resources { - upgraded, err := oldResource.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - resources[key] = upgraded - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - - return &ModuleState{ - Path: path, - Outputs: outputs, - Resources: resources, - Dependencies: dependencies.([]string), - }, nil -} - -func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { - if old == nil { - return nil, nil - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - primary, err := old.Primary.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - deposed := make([]*InstanceState, len(old.Deposed)) - for i, v := range old.Deposed { - upgraded, err := v.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - deposed[i] = upgraded - } - if len(deposed) == 0 { - deposed = nil - } - - return &ResourceState{ - Type: old.Type, - Dependencies: dependencies.([]string), - Primary: primary, - Deposed: deposed, - Provider: old.Provider, - }, nil -} - -func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { - if old == nil { - return nil, nil - } - - attributes, err := copystructure.Copy(old.Attributes) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - ephemeral, err := old.Ephemeral.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - meta, err := copystructure.Copy(old.Meta) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - newMeta := make(map[string]interface{}) - for k, v := range meta.(map[string]string) { - newMeta[k] = v - } - - return &InstanceState{ - ID: old.ID, - Attributes: attributes.(map[string]string), - Ephemeral: *ephemeral, - Meta: newMeta, - }, nil -} - -func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { - connInfo, err := copystructure.Copy(old.ConnInfo) - if err != nil { - return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err) - } - return &EphemeralState{ - ConnInfo: connInfo.(map[string]string), - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go deleted file mode 100644 index e52d35fc..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go +++ /dev/null @@ -1,142 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "regexp" - "sort" - "strconv" - "strings" -) - -// The upgrade process from V2 to V3 state does not affect the structure, -// so we do not need to redeclare all of the structs involved - we just -// take a deep copy of the old structure and assert the version number is -// as we expect. -func upgradeStateV2ToV3(old *State) (*State, error) { - new := old.DeepCopy() - - // Ensure the copied version is v2 before attempting to upgrade - if new.Version != 2 { - return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + - "a state which is not version 2.") - } - - // Set the new version number - new.Version = 3 - - // Change the counts for things which look like maps to use the % - // syntax. Remove counts for empty collections - they will be added - // back in later. - for _, module := range new.Modules { - for _, resource := range module.Resources { - // Upgrade Primary - if resource.Primary != nil { - upgradeAttributesV2ToV3(resource.Primary) - } - - // Upgrade Deposed - if resource.Deposed != nil { - for _, deposed := range resource.Deposed { - upgradeAttributesV2ToV3(deposed) - } - } - } - } - - return new, nil -} - -func upgradeAttributesV2ToV3(instanceState *InstanceState) error { - collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) - collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) - - // Identify the key prefix of anything which is a collection - var collectionKeyPrefixes []string - for key := range instanceState.Attributes { - if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) - } - } - sort.Strings(collectionKeyPrefixes) - - log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) - - // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not - // run very often. - for _, prefix := range collectionKeyPrefixes { - // First get the actual keys that belong to this prefix - var potentialKeysMatching []string - for key := range instanceState.Attributes { - if strings.HasPrefix(key, prefix) { - potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) - } - } - sort.Strings(potentialKeysMatching) - - var actualKeysMatching []string - for _, key := range potentialKeysMatching { - if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - actualKeysMatching = append(actualKeysMatching, submatches[0][1]) - } else { - if key != "#" { - actualKeysMatching = append(actualKeysMatching, key) - } - } - } - actualKeysMatching = uniqueSortedStrings(actualKeysMatching) - - // Now inspect the keys in order to determine whether this is most likely to be - // a map, list or set. There is room for error here, so we log in each case. If - // there is no method of telling, we remove the key from the InstanceState in - // order that it will be recreated. Again, this could be rolled into fewer loops - // but we prefer clarity. - - oldCountKey := fmt.Sprintf("%s#", prefix) - - // First, detect "obvious" maps - which have non-numeric keys (mostly). - hasNonNumericKeys := false - for _, key := range actualKeysMatching { - if _, err := strconv.Atoi(key); err != nil { - hasNonNumericKeys = true - } - } - if hasNonNumericKeys { - newCountKey := fmt.Sprintf("%s%%", prefix) - - instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", - strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) - } - - // Now detect empty collections and remove them from state. - if len(actualKeysMatching) == 0 { - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", - strings.TrimSuffix(prefix, ".")) - } - } - - return nil -} - -// uniqueSortedStrings removes duplicates from a slice of strings and returns -// a sorted slice of the unique strings. -func uniqueSortedStrings(input []string) []string { - uniquemap := make(map[string]struct{}) - for _, str := range input { - uniquemap[str] = struct{}{} - } - - output := make([]string, len(uniquemap)) - - i := 0 - for key := range uniquemap { - output[i] = key - i = i + 1 - } - - sort.Strings(output) - return output -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go deleted file mode 100644 index 68cffb41..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go +++ /dev/null @@ -1,145 +0,0 @@ -package terraform - -// stateV1 keeps track of a snapshot state-of-the-world that Terraform -// can use to keep track of what real world resources it is actually -// managing. -// -// stateV1 is _only used for the purposes of backwards compatibility -// and is no longer used in Terraform. -// -// For the upgrade process, see state_upgrade_v1_to_v2.go -type stateV1 struct { - // Version is the protocol version. "1" for a StateV1. - Version int `json:"version"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV1 `json:"remote,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV1 `json:"modules"` -} - -type remoteStateV1 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type moduleStateV1 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]string `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV1 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` -} - -type resourceStateV1 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV1 `json:"primary"` - - // Tainted is used to track any underlying instances that - // have been created but are in a bad or unknown state and - // need to be cleaned up subsequently. In the - // standard case, there is only at most a single instance. - // However, in pathological cases, it is possible for the number - // of instances to accumulate. - Tainted []*instanceStateV1 `json:"tainted,omitempty"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. If there were problems creating the - // replacement, the instance remains in the Deposed list so it can be - // destroyed in a future run. Functionally, Deposed instances are very - // similar to Tainted instances in that Terraform is only tracking them in - // order to remember to destroy them. - Deposed []*instanceStateV1 `json:"deposed,omitempty"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider,omitempty"` -} - -type instanceStateV1 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes,omitempty"` - - // Ephemeral is used to store any state associated with this instance - // that is necessary for the Terraform run to complete, but is not - // persisted to a state file. - Ephemeral ephemeralStateV1 `json:"-"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. - Meta map[string]string `json:"meta,omitempty"` -} - -type ephemeralStateV1 struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go deleted file mode 100644 index 3f0418d9..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "os" - "testing" -) - -// TestStateFile writes the given state to the path. -func TestStateFile(t *testing.T, path string, state *State) { - f, err := os.Create(path) - if err != nil { - t.Fatalf("err: %s", err) - } - defer f.Close() - - if err := WriteState(state, f); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go deleted file mode 100644 index f9559f41..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphTransformer is the interface that transformers implement. This -// interface is only for transforms that need entire graph visibility. -type GraphTransformer interface { - Transform(*Graph) error -} - -// GraphVertexTransformer is an interface that transforms a single -// Vertex within with graph. This is a specialization of GraphTransformer -// that makes it easy to do vertex replacement. -// -// The GraphTransformer that runs through the GraphVertexTransformers is -// VertexTransformer. -type GraphVertexTransformer interface { - Transform(dag.Vertex) (dag.Vertex, error) -} - -// GraphTransformIf is a helper function that conditionally returns a -// GraphTransformer given. This is useful for calling inline a sequence -// of transforms without having to split it up into multiple append() calls. -func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { - if f() { - return then - } - - return nil -} - -type graphTransformerMulti struct { - Transforms []GraphTransformer -} - -func (t *graphTransformerMulti) Transform(g *Graph) error { - var lastStepStr string - for _, t := range t.Transforms { - log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t) - if err := t.Transform(g); err != nil { - return err - } - if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { - log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s------", t, thisStepStr) - lastStepStr = thisStepStr - } else { - log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t) - } - } - - return nil -} - -// GraphTransformMulti combines multiple graph transformers into a single -// GraphTransformer that runs all the individual graph transformers. -func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { - return &graphTransformerMulti{Transforms: ts} -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go deleted file mode 100644 index cbac1338..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// GraphNodeAttachProvider is an interface that must be implemented by nodes -// that want provider configurations attached. -type GraphNodeAttachProvider interface { - // Must be implemented to determine the path for the configuration - GraphNodeSubPath - - // ProviderName with no module prefix. Example: "aws". - ProviderAddr() addrs.AbsProviderConfig - - // Sets the configuration - AttachProvider(*configs.Provider) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go deleted file mode 100644 index 23578c78..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes -// that want resource configurations attached. -type GraphNodeAttachResourceConfig interface { - GraphNodeResource - - // Sets the configuration - AttachResourceConfig(*configs.Resource) -} - -// AttachResourceConfigTransformer goes through the graph and attaches -// resource configuration structures to nodes that implement -// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig. -// -// The attached configuration structures are directly from the configuration. -// If they're going to be modified, a copy should be made. -type AttachResourceConfigTransformer struct { - Config *configs.Config // Config is the root node in the config tree -} - -func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { - - // Go through and find GraphNodeAttachResource - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachResource implementations - arn, ok := v.(GraphNodeAttachResourceConfig) - if !ok { - continue - } - - // Determine what we're looking for - addr := arn.ResourceAddr() - - // Get the configuration. - config := t.Config.DescendentForInstance(addr.Module) - if config == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v) - continue - } - - for _, r := range config.Module.ManagedResources { - rAddr := r.Addr() - - if rAddr != addr.Resource { - // Not the same resource - continue - } - - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange) - arn.AttachResourceConfig(r) - } - for _, r := range config.Module.DataResources { - rAddr := r.Addr() - - if rAddr != addr.Resource { - // Not the same resource - continue - } - - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange) - arn.AttachResourceConfig(r) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go deleted file mode 100644 index fee220b5..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go +++ /dev/null @@ -1,99 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeAttachResourceSchema is an interface implemented by node types -// that need a resource schema attached. -type GraphNodeAttachResourceSchema interface { - GraphNodeResource - GraphNodeProviderConsumer - - AttachResourceSchema(schema *configschema.Block, version uint64) -} - -// GraphNodeAttachProviderConfigSchema is an interface implemented by node types -// that need a provider configuration schema attached. -type GraphNodeAttachProviderConfigSchema interface { - GraphNodeProvider - - AttachProviderConfigSchema(*configschema.Block) -} - -// GraphNodeAttachProvisionerSchema is an interface implemented by node types -// that need one or more provisioner schemas attached. -type GraphNodeAttachProvisionerSchema interface { - ProvisionedBy() []string - - // SetProvisionerSchema is called during transform for each provisioner - // type returned from ProvisionedBy, providing the configuration schema - // for each provisioner in turn. The implementer should save these for - // later use in evaluating provisioner configuration blocks. - AttachProvisionerSchema(name string, schema *configschema.Block) -} - -// AttachSchemaTransformer finds nodes that implement -// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or -// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each -// and then passes them to a method implemented by the node. -type AttachSchemaTransformer struct { - Schemas *Schemas -} - -func (t *AttachSchemaTransformer) Transform(g *Graph) error { - if t.Schemas == nil { - // Should never happen with a reasonable caller, but we'll return a - // proper error here anyway so that we'll fail gracefully. - return fmt.Errorf("AttachSchemaTransformer used with nil Schemas") - } - - for _, v := range g.Vertices() { - - if tv, ok := v.(GraphNodeAttachResourceSchema); ok { - addr := tv.ResourceAddr() - mode := addr.Resource.Mode - typeName := addr.Resource.Type - providerAddr, _ := tv.ProvidedBy() - providerType := providerAddr.ProviderConfig.Type - - schema, version := t.Schemas.ResourceTypeConfig(providerType, mode, typeName) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v)) - tv.AttachResourceSchema(schema, version) - } - - if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok { - providerAddr := tv.ProviderAddr() - schema := t.Schemas.ProviderConfig(providerAddr.ProviderConfig.Type) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v)) - tv.AttachProviderConfigSchema(schema) - } - - if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok { - names := tv.ProvisionedBy() - for _, name := range names { - schema := t.Schemas.ProvisionerConfig(name) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v)) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v)) - tv.AttachProvisionerSchema(name, schema) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go deleted file mode 100644 index f8749487..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go +++ /dev/null @@ -1,68 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// GraphNodeAttachResourceState is an interface that can be implemented -// to request that a ResourceState is attached to the node. -// -// Due to a historical naming inconsistency, the type ResourceState actually -// represents the state for a particular _instance_, while InstanceState -// represents the values for that instance during a particular phase -// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState -// is supported only for nodes that represent resource instances, even though -// the name might suggest it is for containing resources. -type GraphNodeAttachResourceState interface { - GraphNodeResourceInstance - - // Sets the state - AttachResourceState(*states.Resource) -} - -// AttachStateTransformer goes through the graph and attaches -// state to nodes that implement the interfaces above. -type AttachStateTransformer struct { - State *states.State // State is the root state -} - -func (t *AttachStateTransformer) Transform(g *Graph) error { - // If no state, then nothing to do - if t.State == nil { - log.Printf("[DEBUG] Not attaching any node states: overall state is nil") - return nil - } - - for _, v := range g.Vertices() { - // Nodes implement this interface to request state attachment. - an, ok := v.(GraphNodeAttachResourceState) - if !ok { - continue - } - addr := an.ResourceInstanceAddr() - - rs := t.State.Resource(addr.ContainingResource()) - if rs == nil { - log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr) - continue - } - - is := rs.Instance(addr.Resource.Key) - if is == nil { - // We don't actually need this here, since we'll attach the whole - // resource state, but we still check because it'd be weird - // for the specific instance we're attaching to not to exist. - log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr) - continue - } - - // make sure to attach a copy of the state, so instances can modify the - // same ResourceState. - an.AttachResourceState(rs.DeepCopy()) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go deleted file mode 100644 index 8920761e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go +++ /dev/null @@ -1,133 +0,0 @@ -package terraform - -import ( - "log" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// ConfigTransformer is a GraphTransformer that adds all the resources -// from the configuration to the graph. -// -// The module used to configure this transformer must be the root module. -// -// Only resources are added to the graph. Variables, outputs, and -// providers must be added via other transforms. -// -// Unlike ConfigTransformerOld, this transformer creates a graph with -// all resources including module resources, rather than creating module -// nodes that are then "flattened". -type ConfigTransformer struct { - Concrete ConcreteResourceNodeFunc - - // Module is the module to add resources from. - Config *configs.Config - - // Unique will only add resources that aren't already present in the graph. - Unique bool - - // Mode will only add resources that match the given mode - ModeFilter bool - Mode addrs.ResourceMode - - l sync.Mutex - uniqueMap map[string]struct{} -} - -func (t *ConfigTransformer) Transform(g *Graph) error { - // Lock since we use some internal state - t.l.Lock() - defer t.l.Unlock() - - // If no configuration is available, we don't do anything - if t.Config == nil { - return nil - } - - // Reset the uniqueness map. If we're tracking uniques, then populate - // it with addresses. - t.uniqueMap = make(map[string]struct{}) - defer func() { t.uniqueMap = nil }() - if t.Unique { - for _, v := range g.Vertices() { - if rn, ok := v.(GraphNodeResource); ok { - t.uniqueMap[rn.ResourceAddr().String()] = struct{}{} - } - } - } - - // Start the transformation process - return t.transform(g, t.Config) -} - -func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error { - // If no config, do nothing - if config == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, config); err != nil { - return err - } - - // Transform all the children. - for _, c := range config.Children { - if err := t.transform(g, c); err != nil { - return err - } - } - - return nil -} - -func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error { - path := config.Path - module := config.Module - log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path) - - // For now we assume that each module call produces only one module - // instance with no key, since we don't yet support "count" and "for_each" - // on modules. - // FIXME: As part of supporting "count" and "for_each" on modules, rework - // this so that we'll "expand" the module call first and then create graph - // nodes for each module instance separately. - instPath := path.UnkeyedInstanceShim() - - allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources)) - for _, r := range module.ManagedResources { - allResources = append(allResources, r) - } - for _, r := range module.DataResources { - allResources = append(allResources, r) - } - - for _, r := range allResources { - relAddr := r.Addr() - - if t.ModeFilter && relAddr.Mode != t.Mode { - // Skip non-matching modes - continue - } - - addr := relAddr.Absolute(instPath) - if _, ok := t.uniqueMap[addr.String()]; ok { - // We've already seen a resource with this address. This should - // never happen, because we enforce uniqueness in the config loader. - continue - } - - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go deleted file mode 100644 index 892f75ec..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go +++ /dev/null @@ -1,33 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// CountBoundaryTransformer adds a node that depends on everything else -// so that it runs last in order to clean up the state for nodes that -// are on the "count boundary": "foo.0" when only one exists becomes "foo" -type CountBoundaryTransformer struct { - Config *configs.Config -} - -func (t *CountBoundaryTransformer) Transform(g *Graph) error { - node := &NodeCountBoundary{ - Config: t.Config, - } - g.Add(node) - - // Depends on everything - for _, v := range g.Vertices() { - // Don't connect to ourselves - if v == node { - continue - } - - // Connect! - g.Connect(dag.BasicEdge(node, v)) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go deleted file mode 100644 index 98e088ee..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go +++ /dev/null @@ -1,297 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// GraphNodeDestroyerCBD must be implemented by nodes that might be -// create-before-destroy destroyers, or might plan a create-before-destroy -// action. -type GraphNodeDestroyerCBD interface { - // CreateBeforeDestroy returns true if this node represents a node - // that is doing a CBD. - CreateBeforeDestroy() bool - - // ModifyCreateBeforeDestroy is called when the CBD state of a node - // is changed dynamically. This can return an error if this isn't - // allowed. - ModifyCreateBeforeDestroy(bool) error -} - -// GraphNodeAttachDestroyer is implemented by applyable nodes that have a -// companion destroy node. This allows the creation node to look up the status -// of the destroy node and determine if it needs to depose the existing state, -// or replace it. -// If a node is not marked as create-before-destroy in the configuration, but a -// dependency forces that status, only the destroy node will be aware of that -// status. -type GraphNodeAttachDestroyer interface { - // AttachDestroyNode takes a destroy node and saves a reference to that - // node in the receiver, so it can later check the status of - // CreateBeforeDestroy(). - AttachDestroyNode(n GraphNodeDestroyerCBD) -} - -// ForcedCBDTransformer detects when a particular CBD-able graph node has -// dependencies with another that has create_before_destroy set that require -// it to be forced on, and forces it on. -// -// This must be used in the plan graph builder to ensure that -// create_before_destroy settings are properly propagated before constructing -// the planned changes. This requires that the plannable resource nodes -// implement GraphNodeDestroyerCBD. -type ForcedCBDTransformer struct { -} - -func (t *ForcedCBDTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - // If there are no CBD decendent (dependent nodes), then we - // do nothing here. - if !t.hasCBDDescendent(g, v) { - log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v) - continue - } - - // If this isn't naturally a CBD node, this means that an descendent is - // and we need to auto-upgrade this node to CBD. We do this because - // a CBD node depending on non-CBD will result in cycles. To avoid this, - // we always attempt to upgrade it. - log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v) - if err := dn.ModifyCreateBeforeDestroy(true); err != nil { - return fmt.Errorf( - "%s: must have create before destroy enabled because "+ - "a dependent resource has CBD enabled. However, when "+ - "attempting to automatically do this, an error occurred: %s", - dag.VertexName(v), err) - } - } else { - log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v) - } - } - return nil -} - -// hasCBDDescendent returns true if any descendent (node that depends on this) -// has CBD set. -func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool { - s, _ := g.Descendents(v) - if s == nil { - return true - } - - for _, ov := range s.List() { - dn, ok := ov.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if dn.CreateBeforeDestroy() { - // some descendent is CreateBeforeDestroy, so we need to follow suit - log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov)) - return true - } - } - - return false -} - -// CBDEdgeTransformer modifies the edges of CBD nodes that went through -// the DestroyEdgeTransformer to have the right dependencies. There are -// two real tasks here: -// -// 1. With CBD, the destroy edge is inverted: the destroy depends on -// the creation. -// -// 2. A_d must depend on resources that depend on A. This is to enable -// the destroy to only happen once nodes that depend on A successfully -// update to A. Example: adding a web server updates the load balancer -// before deleting the old web server. -// -// This transformer requires that a previous transformer has already forced -// create_before_destroy on for nodes that are depended on by explicit CBD -// nodes. This is the logic in ForcedCBDTransformer, though in practice we -// will get here by recording the CBD-ness of each change in the plan during -// the plan walk and then forcing the nodes into the appropriate setting during -// DiffTransformer when building the apply graph. -type CBDEdgeTransformer struct { - // Module and State are only needed to look up dependencies in - // any way possible. Either can be nil if not availabile. - Config *configs.Config - State *states.State - - // If configuration is present then Schemas is required in order to - // obtain schema information from providers and provisioners so we can - // properly resolve implicit dependencies. - Schemas *Schemas - - // If the operation is a simple destroy, no transformation is done. - Destroy bool -} - -func (t *CBDEdgeTransformer) Transform(g *Graph) error { - if t.Destroy { - return nil - } - - // Go through and reverse any destroy edges - destroyMap := make(map[string][]dag.Vertex) - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - dern, ok := v.(GraphNodeDestroyer) - if !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - continue - } - - // Find the destroy edge. There should only be one. - for _, e := range g.EdgesTo(v) { - // Not a destroy edge, ignore it - de, ok := e.(*DestroyEdge) - if !ok { - continue - } - - log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s", - dag.VertexName(de.Source()), dag.VertexName(de.Target())) - - // Found it! Invert. - g.RemoveEdge(de) - applyNode := de.Source() - destroyNode := de.Target() - g.Connect(&DestroyEdge{S: destroyNode, T: applyNode}) - break - } - - // If the address has an index, we strip that. Our depMap creation - // graph doesn't expand counts so we don't currently get _exact_ - // dependencies. One day when we limit dependencies more exactly - // this will have to change. We have a test case covering this - // (depNonCBDCountBoth) so it'll be caught. - addr := dern.DestroyAddr() - key := addr.ContainingResource().String() - - // Add this to the list of nodes that we need to fix up - // the edges for (step 2 above in the docs). - destroyMap[key] = append(destroyMap[key], v) - } - - // If we have no CBD nodes, then our work here is done - if len(destroyMap) == 0 { - return nil - } - - // We have CBD nodes. We now have to move on to the much more difficult - // task of connecting dependencies of the creation side of the destroy - // to the destruction node. The easiest way to explain this is an example: - // - // Given a pre-destroy dependence of: A => B - // And A has CBD set. - // - // The resulting graph should be: A => B => A_d - // - // They key here is that B happens before A is destroyed. This is to - // facilitate the primary purpose for CBD: making sure that downstreams - // are properly updated to avoid downtime before the resource is destroyed. - depMap, err := t.depMap(g, destroyMap) - if err != nil { - return err - } - - // We now have the mapping of resource addresses to the destroy - // nodes they need to depend on. We now go through our own vertices to - // find any matching these addresses and make the connection. - for _, v := range g.Vertices() { - // We're looking for creators - rn, ok := v.(GraphNodeCreator) - if !ok { - continue - } - - // Get the address - addr := rn.CreateAddr() - - // If the address has an index, we strip that. Our depMap creation - // graph doesn't expand counts so we don't currently get _exact_ - // dependencies. One day when we limit dependencies more exactly - // this will have to change. We have a test case covering this - // (depNonCBDCount) so it'll be caught. - key := addr.ContainingResource().String() - - // If there is nothing this resource should depend on, ignore it - dns, ok := depMap[key] - if !ok { - continue - } - - // We have nodes! Make the connection - for _, dn := range dns { - log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s", - dag.VertexName(dn), dag.VertexName(v)) - g.Connect(dag.BasicEdge(dn, v)) - } - } - - return nil -} - -func (t *CBDEdgeTransformer) depMap(g *Graph, destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { - // Build the list of destroy nodes that each resource address should depend - // on. For example, when we find B, we map the address of B to A_d in the - // "depMap" variable below. - depMap := make(map[string][]dag.Vertex) - for _, v := range g.Vertices() { - // We're looking for resources. - rn, ok := v.(GraphNodeResource) - if !ok { - continue - } - - // Get the address - addr := rn.ResourceAddr() - key := addr.String() - - // Get the destroy nodes that are destroying this resource. - // If there aren't any, then we don't need to worry about - // any connections. - dns, ok := destroyMap[key] - if !ok { - continue - } - - // Get the nodes that depend on this on. In the example above: - // finding B in A => B. Since dependencies can span modules, walk all - // descendents of the resource. - des, _ := g.Descendents(v) - for _, v := range des.List() { - // We're looking for resources. - rn, ok := v.(GraphNodeResource) - if !ok { - continue - } - - // Keep track of the destroy nodes that this address - // needs to depend on. - key := rn.ResourceAddr().String() - depMap[key] = append(depMap[key], dns...) - } - } - - return depMap, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go deleted file mode 100644 index 1d211570..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go +++ /dev/null @@ -1,323 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeDestroyer must be implemented by nodes that destroy resources. -type GraphNodeDestroyer interface { - dag.Vertex - - // DestroyAddr is the address of the resource that is being - // destroyed by this node. If this returns nil, then this node - // is not destroying anything. - DestroyAddr() *addrs.AbsResourceInstance -} - -// GraphNodeCreator must be implemented by nodes that create OR update resources. -type GraphNodeCreator interface { - // CreateAddr is the address of the resource being created or updated - CreateAddr() *addrs.AbsResourceInstance -} - -// DestroyEdgeTransformer is a GraphTransformer that creates the proper -// references for destroy resources. Destroy resources are more complex -// in that they must be depend on the destruction of resources that -// in turn depend on the CREATION of the node being destroy. -// -// That is complicated. Visually: -// -// B_d -> A_d -> A -> B -// -// Notice that A destroy depends on B destroy, while B create depends on -// A create. They're inverted. This must be done for example because often -// dependent resources will block parent resources from deleting. Concrete -// example: VPC with subnets, the VPC can't be deleted while there are -// still subnets. -type DestroyEdgeTransformer struct { - // These are needed to properly build the graph of dependencies - // to determine what a destroy node depends on. Any of these can be nil. - Config *configs.Config - State *states.State - - // If configuration is present then Schemas is required in order to - // obtain schema information from providers and provisioners in order - // to properly resolve implicit dependencies. - Schemas *Schemas -} - -func (t *DestroyEdgeTransformer) Transform(g *Graph) error { - // Build a map of what is being destroyed (by address string) to - // the list of destroyers. Usually there will be at most one destroyer - // per node, but we allow multiple if present for completeness. - destroyers := make(map[string][]GraphNodeDestroyer) - destroyerAddrs := make(map[string]addrs.AbsResourceInstance) - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyer) - if !ok { - continue - } - - addrP := dn.DestroyAddr() - if addrP == nil { - continue - } - addr := *addrP - - key := addr.String() - log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(dn), v, key) - destroyers[key] = append(destroyers[key], dn) - destroyerAddrs[key] = addr - } - - // If we aren't destroying anything, there will be no edges to make - // so just exit early and avoid future work. - if len(destroyers) == 0 { - return nil - } - - // Go through and connect creators to destroyers. Going along with - // our example, this makes: A_d => A - for _, v := range g.Vertices() { - cn, ok := v.(GraphNodeCreator) - if !ok { - continue - } - - addr := cn.CreateAddr() - if addr == nil { - continue - } - - key := addr.String() - ds := destroyers[key] - if len(ds) == 0 { - continue - } - - for _, d := range ds { - // For illustrating our example - a_d := d.(dag.Vertex) - a := v - - log.Printf( - "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q", - dag.VertexName(a), dag.VertexName(a_d)) - - g.Connect(&DestroyEdge{S: a, T: a_d}) - - // Attach the destroy node to the creator - // There really shouldn't be more than one destroyer, but even if - // there are, any of them will represent the correct - // CreateBeforeDestroy status. - if n, ok := cn.(GraphNodeAttachDestroyer); ok { - if d, ok := d.(GraphNodeDestroyerCBD); ok { - n.AttachDestroyNode(d) - } - } - } - } - - // This is strange but is the easiest way to get the dependencies - // of a node that is being destroyed. We use another graph to make sure - // the resource is in the graph and ask for references. We have to do this - // because the node that is being destroyed may NOT be in the graph. - // - // Example: resource A is force new, then destroy A AND create A are - // in the graph. BUT if resource A is just pure destroy, then only - // destroy A is in the graph, and create A is not. - providerFn := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{NodeAbstractProvider: a} - } - steps := []GraphTransformer{ - // Add the local values - &LocalTransformer{Config: t.Config}, - - // Add outputs and metadata - &OutputTransformer{Config: t.Config}, - &AttachResourceConfigTransformer{Config: t.Config}, - &AttachStateTransformer{State: t.State}, - - // Add all the variables. We can depend on resources through - // variables due to module parameters, and we need to properly - // determine that. - &RootVariableTransformer{Config: t.Config}, - &ModuleVariableTransformer{Config: t.Config}, - - TransformProviders(nil, providerFn, t.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: t.Schemas}, - - &ReferenceTransformer{}, - } - - // Go through all the nodes being destroyed and create a graph. - // The resulting graph is only of things being CREATED. For example, - // following our example, the resulting graph would be: - // - // A, B (with no edges) - // - var tempG Graph - var tempDestroyed []dag.Vertex - for d := range destroyers { - // d is the string key for the resource being destroyed. We actually - // want the address value, which we stashed earlier. - addr := destroyerAddrs[d] - - // This part is a little bit weird but is the best way to - // find the dependencies we need to: build a graph and use the - // attach config and state transformers then ask for references. - abstract := NewNodeAbstractResourceInstance(addr) - tempG.Add(abstract) - tempDestroyed = append(tempDestroyed, abstract) - - // We also add the destroy version here since the destroy can - // depend on things that the creation doesn't (destroy provisioners). - destroy := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: abstract} - tempG.Add(destroy) - tempDestroyed = append(tempDestroyed, destroy) - } - - // Run the graph transforms so we have the information we need to - // build references. - log.Printf("[TRACE] DestroyEdgeTransformer: constructing temporary graph for analysis of references, starting from:\n%s", tempG.StringWithNodeTypes()) - for _, s := range steps { - log.Printf("[TRACE] DestroyEdgeTransformer: running %T on temporary graph", s) - if err := s.Transform(&tempG); err != nil { - log.Printf("[TRACE] DestroyEdgeTransformer: %T failed: %s", s, err) - return err - } - } - log.Printf("[TRACE] DestroyEdgeTransformer: temporary reference graph:\n%s", tempG.String()) - - // Go through all the nodes in the graph and determine what they - // depend on. - for _, v := range tempDestroyed { - // Find all ancestors of this to determine the edges we'll depend on - vs, err := tempG.Ancestors(v) - if err != nil { - return err - } - - refs := make([]dag.Vertex, 0, vs.Len()) - for _, raw := range vs.List() { - refs = append(refs, raw.(dag.Vertex)) - } - - refNames := make([]string, len(refs)) - for i, ref := range refs { - refNames[i] = dag.VertexName(ref) - } - log.Printf( - "[TRACE] DestroyEdgeTransformer: creation node %q references %s", - dag.VertexName(v), refNames) - - // If we have no references, then we won't need to do anything - if len(refs) == 0 { - continue - } - - // Get the destroy node for this. In the example of our struct, - // we are currently at B and we're looking for B_d. - rn, ok := v.(GraphNodeResourceInstance) - if !ok { - log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s, since it's not a resource", dag.VertexName(v)) - continue - } - - addr := rn.ResourceInstanceAddr() - dns := destroyers[addr.String()] - - // We have dependencies, check if any are being destroyed - // to build the list of things that we must depend on! - // - // In the example of the struct, if we have: - // - // B_d => A_d => A => B - // - // Then at this point in the algorithm we started with B_d, - // we built B (to get dependencies), and we found A. We're now looking - // to see if A_d exists. - var depDestroyers []dag.Vertex - for _, v := range refs { - rn, ok := v.(GraphNodeResourceInstance) - if !ok { - continue - } - - addr := rn.ResourceInstanceAddr() - key := addr.String() - if ds, ok := destroyers[key]; ok { - for _, d := range ds { - depDestroyers = append(depDestroyers, d.(dag.Vertex)) - log.Printf( - "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s", - key, dag.VertexName(d)) - } - } - } - - // Go through and make the connections. Use the variable - // names "a_d" and "b_d" to reference our example. - for _, a_d := range dns { - for _, b_d := range depDestroyers { - if b_d != a_d { - log.Printf("[TRACE] DestroyEdgeTransformer: %q depends on %q", dag.VertexName(b_d), dag.VertexName(a_d)) - g.Connect(dag.BasicEdge(b_d, a_d)) - } - } - } - } - - return t.pruneResources(g) -} - -// If there are only destroy instances for a particular resource, there's no -// reason for the resource node to prepare the state. Remove Resource nodes so -// that they don't fail by trying to evaluate a resource that is only being -// destroyed along with its dependencies. -func (t *DestroyEdgeTransformer) pruneResources(g *Graph) error { - for _, v := range g.Vertices() { - n, ok := v.(*NodeApplyableResource) - if !ok { - continue - } - - // if there are only destroy dependencies, we don't need this node - des, err := g.Descendents(n) - if err != nil { - return err - } - - descendents := des.List() - nonDestroyInstanceFound := false - for _, v := range descendents { - if _, ok := v.(*NodeApplyableResourceInstance); ok { - nonDestroyInstanceFound = true - break - } - } - - if nonDestroyInstanceFound { - continue - } - - // connect all the through-edges, then delete the node - for _, d := range g.DownEdges(n).List() { - for _, u := range g.UpEdges(n).List() { - g.Connect(dag.BasicEdge(u, d)) - } - } - log.Printf("DestroyEdgeTransformer: pruning unused resource node %s", dag.VertexName(n)) - g.Remove(n) - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go deleted file mode 100644 index b7a237fc..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go +++ /dev/null @@ -1,184 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// DiffTransformer is a GraphTransformer that adds graph nodes representing -// each of the resource changes described in the given Changes object. -type DiffTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - State *states.State - Changes *plans.Changes -} - -func (t *DiffTransformer) Transform(g *Graph) error { - if t.Changes == nil || len(t.Changes.Resources) == 0 { - // Nothing to do! - return nil - } - - // Go through all the modules in the diff. - log.Printf("[TRACE] DiffTransformer starting") - - var diags tfdiags.Diagnostics - state := t.State - changes := t.Changes - - // DiffTransformer creates resource _instance_ nodes. If there are any - // whole-resource nodes already in the graph, we must ensure that they - // get evaluated before any of the corresponding instances by creating - // dependency edges, so we'll do some prep work here to ensure we'll only - // create connections to nodes that existed before we started here. - resourceNodes := map[string][]GraphNodeResource{} - for _, node := range g.Vertices() { - rn, ok := node.(GraphNodeResource) - if !ok { - continue - } - // We ignore any instances that _also_ implement - // GraphNodeResourceInstance, since in the unlikely event that they - // do exist we'd probably end up creating cycles by connecting them. - if _, ok := node.(GraphNodeResourceInstance); ok { - continue - } - - addr := rn.ResourceAddr().String() - resourceNodes[addr] = append(resourceNodes[addr], rn) - } - - for _, rc := range changes.Resources { - addr := rc.Addr - dk := rc.DeposedKey - - log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk) - - // Depending on the action we'll need some different combinations of - // nodes, because destroying uses a special node type separate from - // other actions. - var update, delete, createBeforeDestroy bool - switch rc.Action { - case plans.NoOp: - continue - case plans.Delete: - delete = true - case plans.DeleteThenCreate, plans.CreateThenDelete: - update = true - delete = true - createBeforeDestroy = (rc.Action == plans.CreateThenDelete) - default: - update = true - } - - if dk != states.NotDeposed && update { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid planned change for deposed object", - fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk), - )) - continue - } - - // If we're going to do a create_before_destroy Replace operation then - // we need to allocate a DeposedKey to use to retain the - // not-yet-destroyed prior object, so that the delete node can destroy - // _that_ rather than the newly-created node, which will be current - // by the time the delete node is visited. - if update && delete && createBeforeDestroy { - // In this case, variable dk will be the _pre-assigned_ DeposedKey - // that must be used if the update graph node deposes the current - // instance, which will then align with the same key we pass - // into the destroy node to ensure we destroy exactly the deposed - // object we expect. - if state != nil { - ris := state.ResourceInstance(addr) - if ris == nil { - // Should never happen, since we don't plan to replace an - // instance that doesn't exist yet. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid planned change", - fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr), - )) - continue - } - - // Allocating a deposed key separately from using it can be racy - // in general, but we assume here that nothing except the apply - // node we instantiate below will actually make new deposed objects - // in practice, and so the set of already-used keys will not change - // between now and then. - dk = ris.FindUnusedDeposedKey() - } else { - // If we have no state at all yet then we can use _any_ - // DeposedKey. - dk = states.NewDeposedKey() - } - } - - if update { - // All actions except destroying the node type chosen by t.Concrete - abstract := NewNodeAbstractResourceInstance(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - if createBeforeDestroy { - // We'll attach our pre-allocated DeposedKey to the node if - // it supports that. NodeApplyableResourceInstance is the - // specific concrete node type we are looking for here really, - // since that's the only node type that might depose objects. - if dn, ok := node.(GraphNodeDeposer); ok { - dn.SetPreallocatedDeposedKey(dk) - } - log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk) - } else { - log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node)) - } - - g.Add(node) - rsrcAddr := addr.ContainingResource().String() - for _, rsrcNode := range resourceNodes[rsrcAddr] { - g.Connect(dag.BasicEdge(node, rsrcNode)) - } - } - - if delete { - // Destroying always uses a destroy-specific node type, though - // which one depends on whether we're destroying a current object - // or a deposed object. - var node GraphNodeResourceInstance - abstract := NewNodeAbstractResourceInstance(addr) - if dk == states.NotDeposed { - node = &NodeDestroyResourceInstance{ - NodeAbstractResourceInstance: abstract, - DeposedKey: dk, - } - node.(*NodeDestroyResourceInstance).ModifyCreateBeforeDestroy(createBeforeDestroy) - } else { - node = &NodeDestroyDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: abstract, - DeposedKey: dk, - } - } - if dk == states.NotDeposed { - log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node)) - } else { - log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node)) - } - g.Add(node) - } - - } - - log.Printf("[TRACE] DiffTransformer complete") - - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go deleted file mode 100644 index 03eac685..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeExapndable is an interface that nodes can implement to -// signal that they can be expanded. Expanded nodes turn into -// GraphNodeSubgraph nodes within the graph. -type GraphNodeExpandable interface { - Expand(GraphBuilder) (GraphNodeSubgraph, error) -} - -// GraphNodeDynamicExpandable is an interface that nodes can implement -// to signal that they can be expanded at eval-time (hence dynamic). -// These nodes are given the eval context and are expected to return -// a new subgraph. -type GraphNodeDynamicExpandable interface { - DynamicExpand(EvalContext) (*Graph, error) -} - -// GraphNodeSubgraph is an interface a node can implement if it has -// a larger subgraph that should be walked. -type GraphNodeSubgraph interface { - Subgraph() dag.Grapher -} - -// ExpandTransform is a transformer that does a subgraph expansion -// at graph transform time (vs. at eval time). The benefit of earlier -// subgraph expansion is that errors with the graph build can be detected -// at an earlier stage. -type ExpandTransform struct { - Builder GraphBuilder -} - -func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) { - ev, ok := v.(GraphNodeExpandable) - if !ok { - // This isn't an expandable vertex, so just ignore it. - return v, nil - } - - // Expand the subgraph! - log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev)) - return ev.Expand(t.Builder) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go deleted file mode 100644 index 2ce23ddb..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ImportProviderValidateTransformer is a GraphTransformer that goes through -// the providers in the graph and validates that they only depend on variables. -type ImportProviderValidateTransformer struct{} - -func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { - var diags tfdiags.Diagnostics - - for _, v := range g.Vertices() { - // We only care about providers - pv, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // We only care about providers that reference things - rn, ok := pv.(GraphNodeReferencer) - if !ok { - continue - } - - for _, ref := range rn.References() { - if _, ok := ref.Subject.(addrs.InputVariable); !ok { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider dependency for import", - Detail: fmt.Sprintf("The configuration for %s depends on %s. Providers used with import must either have literal configuration or refer only to input variables.", pv.ProviderAddr(), ref.Subject.String()), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - } - } - - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go deleted file mode 100644 index 7dd2c487..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go +++ /dev/null @@ -1,239 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ImportStateTransformer is a GraphTransformer that adds nodes to the -// graph to represent the imports we want to do for resources. -type ImportStateTransformer struct { - Targets []*ImportTarget -} - -func (t *ImportStateTransformer) Transform(g *Graph) error { - for _, target := range t.Targets { - // The ProviderAddr may not be supplied for non-aliased providers. - // This will be populated if the targets come from the cli, but tests - // may not specify implied provider addresses. - providerAddr := target.ProviderAddr - if providerAddr.ProviderConfig.Type == "" { - providerAddr = target.Addr.Resource.Resource.DefaultProviderConfig().Absolute(target.Addr.Module) - } - - node := &graphNodeImportState{ - Addr: target.Addr, - ID: target.ID, - ProviderAddr: providerAddr, - } - g.Add(node) - } - return nil -} - -type graphNodeImportState struct { - Addr addrs.AbsResourceInstance // Addr is the resource address to import into - ID string // ID is the ID to import as - ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type - ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution - - states []providers.ImportedResource -} - -var ( - _ GraphNodeSubPath = (*graphNodeImportState)(nil) - _ GraphNodeEvalable = (*graphNodeImportState)(nil) - _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil) - _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil) -) - -func (n *graphNodeImportState) Name() string { - return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID) -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) ProvidedBy() (addrs.AbsProviderConfig, bool) { - // We assume that n.ProviderAddr has been properly populated here. - // It's the responsibility of the code creating a graphNodeImportState - // to populate this, possibly by calling DefaultProviderConfig() on the - // resource address to infer an implied provider from the resource type - // name. - return n.ProviderAddr, false -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) { - n.ResolvedProvider = addr -} - -// GraphNodeSubPath -func (n *graphNodeImportState) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeImportState) EvalTree() EvalNode { - var provider providers.Interface - - // Reset our states - n.states = nil - - // Return our sequence - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - }, - &EvalImportState{ - Addr: n.Addr.Resource, - Provider: &provider, - ID: n.ID, - Output: &n.states, - }, - }, - } -} - -// GraphNodeDynamicExpandable impl. -// -// We use DynamicExpand as a way to generate the subgraph of refreshes -// and state inserts we need to do for our import state. Since they're new -// resources they don't depend on anything else and refreshes are isolated -// so this is nearly a perfect use case for dynamic expand. -func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - g := &Graph{Path: ctx.Path()} - - // nameCounter is used to de-dup names in the state. - nameCounter := make(map[string]int) - - // Compile the list of addresses that we'll be inserting into the state. - // We do this ahead of time so we can verify that we aren't importing - // something that already exists. - addrs := make([]addrs.AbsResourceInstance, len(n.states)) - for i, state := range n.states { - addr := n.Addr - if t := state.TypeName; t != "" { - addr.Resource.Resource.Type = t - } - - // Determine if we need to suffix the name to de-dup - key := addr.String() - count, ok := nameCounter[key] - if ok { - count++ - addr.Resource.Resource.Name += fmt.Sprintf("-%d", count) - } - nameCounter[key] = count - - // Add it to our list - addrs[i] = addr - } - - // Verify that all the addresses are clear - state := ctx.State() - for _, addr := range addrs { - existing := state.ResourceInstance(addr) - if existing != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource already managed by Terraform", - fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr), - )) - continue - } - } - if diags.HasErrors() { - // Bail out early, then. - return nil, diags.Err() - } - - // For each of the states, we add a node to handle the refresh/add to state. - // "n.states" is populated by our own EvalTree with the result of - // ImportState. Since DynamicExpand is always called after EvalTree, this - // is safe. - for i, state := range n.states { - g.Add(&graphNodeImportStateSub{ - TargetAddr: addrs[i], - State: state, - ResolvedProvider: n.ResolvedProvider, - }) - } - - // Root transform for a single root - t := &RootTransformer{} - if err := t.Transform(g); err != nil { - return nil, err - } - - // Done! - return g, diags.Err() -} - -// graphNodeImportStateSub is the sub-node of graphNodeImportState -// and is part of the subgraph. This node is responsible for refreshing -// and adding a resource to the state once it is imported. -type graphNodeImportStateSub struct { - TargetAddr addrs.AbsResourceInstance - State providers.ImportedResource - ResolvedProvider addrs.AbsProviderConfig -} - -var ( - _ GraphNodeSubPath = (*graphNodeImportStateSub)(nil) - _ GraphNodeEvalable = (*graphNodeImportStateSub)(nil) -) - -func (n *graphNodeImportStateSub) Name() string { - return fmt.Sprintf("import %s result", n.TargetAddr) -} - -func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { - return n.TargetAddr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeImportStateSub) EvalTree() EvalNode { - // If the Ephemeral type isn't set, then it is an error - if n.State.TypeName == "" { - err := fmt.Errorf("import of %s didn't set type", n.TargetAddr.String()) - return &EvalReturnError{Error: &err} - } - - state := n.State.AsInstanceObject() - - var provider providers.Interface - var providerSchema *ProviderSchema - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalRefresh{ - Addr: n.TargetAddr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - &EvalImportStateVerify{ - Addr: n.TargetAddr.Resource, - State: &state, - }, - &EvalWriteState{ - Addr: n.TargetAddr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go deleted file mode 100644 index b97dea2a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// LocalTransformer is a GraphTransformer that adds all the local values -// from the configuration to the graph. -type LocalTransformer struct { - Config *configs.Config -} - -func (t *LocalTransformer) Transform(g *Graph) error { - return t.transformModule(g, t.Config) -} - -func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error { - if c == nil { - // Can't have any locals if there's no config - return nil - } - - // Our addressing system distinguishes between modules and module instances, - // but we're not yet ready to make that distinction here (since we don't - // support "count"/"for_each" on modules) and so we just do a naive - // transform of the module path into a module instance path, assuming that - // no keys are in use. This should be removed when "count" and "for_each" - // are implemented for modules. - path := c.Path.UnkeyedInstanceShim() - - for _, local := range c.Module.Locals { - addr := path.LocalValue(local.Name) - node := &NodeLocal{ - Addr: addr, - Config: local, - } - g.Add(node) - } - - // Also populate locals for child modules - for _, cc := range c.Children { - if err := t.transformModule(g, cc); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go deleted file mode 100644 index caa4b6a6..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go +++ /dev/null @@ -1,126 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// ModuleVariableTransformer is a GraphTransformer that adds all the variables -// in the configuration to the graph. -// -// Any "variable" block present in any non-root module is included here, even -// if a particular variable is not referenced from anywhere. -// -// The transform will produce errors if a call to a module does not conform -// to the expected set of arguments, but this transformer is not in a good -// position to return errors and so the validate walk should include specific -// steps for validating module blocks, separate from this transform. -type ModuleVariableTransformer struct { - Config *configs.Config -} - -func (t *ModuleVariableTransformer) Transform(g *Graph) error { - return t.transform(g, nil, t.Config) -} - -func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error { - // We can have no variables if we have no configuration. - if c == nil { - return nil - } - - // Transform all the children first. - for _, cc := range c.Children { - if err := t.transform(g, c, cc); err != nil { - return err - } - } - - // If we're processing anything other than the root module then we'll - // add graph nodes for variables defined inside. (Variables for the root - // module are dealt with in RootVariableTransformer). - // If we have a parent, we can determine if a module variable is being - // used, so we transform this. - if parent != nil { - if err := t.transformSingle(g, parent, c); err != nil { - return err - } - } - - return nil -} - -func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error { - - // Our addressing system distinguishes between modules and module instances, - // but we're not yet ready to make that distinction here (since we don't - // support "count"/"for_each" on modules) and so we just do a naive - // transform of the module path into a module instance path, assuming that - // no keys are in use. This should be removed when "count" and "for_each" - // are implemented for modules. - path := c.Path.UnkeyedInstanceShim() - _, call := path.Call() - - // Find the call in the parent module configuration, so we can get the - // expressions given for each input variable at the call site. - callConfig, exists := parent.Module.ModuleCalls[call.Name] - if !exists { - // This should never happen, since it indicates an improperly-constructed - // configuration tree. - panic(fmt.Errorf("no module call block found for %s", path)) - } - - // We need to construct a schema for the expected call arguments based on - // the configured variables in our config, which we can then use to - // decode the content of the call block. - schema := &hcl.BodySchema{} - for _, v := range c.Module.Variables { - schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{ - Name: v.Name, - Required: v.Default == cty.NilVal, - }) - } - - content, contentDiags := callConfig.Config.Content(schema) - if contentDiags.HasErrors() { - // Validation code elsewhere should deal with any errors before we - // get in here, but we'll report them out here just in case, to - // avoid crashes. - var diags tfdiags.Diagnostics - diags = diags.Append(contentDiags) - return diags.Err() - } - - for _, v := range c.Module.Variables { - var expr hcl.Expression - if attr := content.Attributes[v.Name]; attr != nil { - expr = attr.Expr - } else { - // No expression provided for this variable, so we'll make a - // synthetic one using the variable's default value. - expr = &hclsyntax.LiteralValueExpr{ - Val: v.Default, - SrcRange: v.DeclRange, // This is not exact, but close enough - } - } - - // For now we treat all module variables as "applyable", even though - // such nodes are valid to use on other walks too. We may specialize - // this in future if we find reasons to employ different behaviors - // in different scenarios. - node := &NodeApplyableModuleVariable{ - Addr: path.InputVariable(v.Name), - Config: v, - Expr: expr, - } - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go deleted file mode 100644 index 4d1323fb..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go +++ /dev/null @@ -1,175 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/zclconf/go-cty/cty" -) - -// OrphanResourceCountTransformer is a GraphTransformer that adds orphans -// for an expanded count to the graph. The determination of this depends -// on the count argument given. -// -// Orphans are found by comparing the count to what is found in the state. -// This transform assumes that if an element in the state is within the count -// bounds given, that it is not an orphan. -type OrphanResourceCountTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - - Count int // Actual count of the resource, or -1 if count is not set at all - ForEach map[string]cty.Value // The ForEach map on the resource - Addr addrs.AbsResource // Addr of the resource to look for orphans - State *states.State // Full global state -} - -func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { - rs := t.State.Resource(t.Addr) - if rs == nil { - return nil // Resource doesn't exist in state, so nothing to do! - } - - haveKeys := make(map[addrs.InstanceKey]struct{}) - for key := range rs.Instances { - haveKeys[key] = struct{}{} - } - - // if for_each is set, use that transformer - if t.ForEach != nil { - return t.transformForEach(haveKeys, g) - } - if t.Count < 0 { - return t.transformNoCount(haveKeys, g) - } - if t.Count == 0 { - return t.transformZeroCount(haveKeys, g) - } - return t.transformCount(haveKeys, g) -} - -func (t *OrphanResourceCountTransformer) transformForEach(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // If there is a NoKey node, add this to the graph first, - // so that we can create edges to it in subsequent (StringKey) nodes. - // This is because the last item determines the resource mode for the whole resource, - // (see SetResourceInstanceCurrent for more information) and we need to evaluate - // an orphaned (NoKey) resource before the in-memory state is updated - // to deal with a new for_each resource - _, hasNoKeyNode := haveKeys[addrs.NoKey] - var noKeyNode dag.Vertex - if hasNoKeyNode { - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(addrs.NoKey)) - noKeyNode = abstract - if f := t.Concrete; f != nil { - noKeyNode = f(abstract) - } - g.Add(noKeyNode) - } - - for key := range haveKeys { - // If the key is no-key, we have already added it, so skip - if key == addrs.NoKey { - continue - } - - s, _ := key.(addrs.StringKey) - // If the key is present in our current for_each, carry on - if _, ok := t.ForEach[string(s)]; ok { - continue - } - - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) - g.Add(node) - - // Add edge to noKeyNode if it exists - if hasNoKeyNode { - g.Connect(dag.BasicEdge(node, noKeyNode)) - } - } - return nil -} - -func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // Due to the logic in Transform, we only get in here if our count is - // at least one. - - _, have0Key := haveKeys[addrs.IntKey(0)] - - for key := range haveKeys { - if key == addrs.NoKey && !have0Key { - // If we have no 0-key then we will accept a no-key instance - // as an alias for it. - continue - } - - i, isInt := key.(addrs.IntKey) - if isInt && int(i) < t.Count { - continue - } - - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) - g.Add(node) - } - - return nil -} - -func (t *OrphanResourceCountTransformer) transformZeroCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // This case is easy: we need to orphan any keys we have at all. - - for key := range haveKeys { - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(zero): adding %s as %T", t.Addr, node) - g.Add(node) - } - - return nil -} - -func (t *OrphanResourceCountTransformer) transformNoCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // Negative count indicates that count is not set at all, in which - // case we expect to have a single instance with no key set at all. - // However, we'll also accept an instance with key 0 set as an alias - // for it, in case the user has just deleted the "count" argument and - // so wants to keep the first instance in the set. - - _, haveNoKey := haveKeys[addrs.NoKey] - _, have0Key := haveKeys[addrs.IntKey(0)] - keepKey := addrs.NoKey - if have0Key && !haveNoKey { - // If we don't have a no-key instance then we can use the 0-key instance - // instead. - keepKey = addrs.IntKey(0) - } - - for key := range haveKeys { - if key == keepKey { - continue - } - - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(no-count): adding %s as %T", t.Addr, node) - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go deleted file mode 100644 index cab10da1..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go +++ /dev/null @@ -1,60 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// OrphanOutputTransformer finds the outputs that aren't present -// in the given config that are in the state and adds them to the graph -// for deletion. -type OrphanOutputTransformer struct { - Config *configs.Config // Root of config tree - State *states.State // State is the root state -} - -func (t *OrphanOutputTransformer) Transform(g *Graph) error { - if t.State == nil { - log.Printf("[DEBUG] No state, no orphan outputs") - return nil - } - - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - return nil -} - -func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error { - if ms == nil { - return nil - } - - moduleAddr := ms.Addr - - // Get the config for this path, which is nil if the entire module has been - // removed. - var outputs map[string]*configs.Output - if c := t.Config.DescendentForInstance(moduleAddr); c != nil { - outputs = c.Module.Outputs - } - - // An output is "orphaned" if it's present in the state but not declared - // in the configuration. - for name := range ms.OutputValues { - if _, exists := outputs[name]; exists { - continue - } - - g.Add(&NodeOutputOrphan{ - Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr), - }) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go deleted file mode 100644 index f927b108..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go +++ /dev/null @@ -1,179 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned -// resource instances to the graph. An "orphan" is an instance that is present -// in the state but belongs to a resource that is no longer present in the -// configuration. -// -// This is not the transformer that deals with "count orphans" (instances that -// are no longer covered by a resource's "count" or "for_each" setting); that's -// handled instead by OrphanResourceCountTransformer. -type OrphanResourceInstanceTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - - // State is the global state. We require the global state to - // properly find module orphans at our path. - State *states.State - - // Config is the root node in the configuration tree. We'll look up - // the appropriate note in this tree using the path in each node. - Config *configs.Config -} - -func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error { - if t.State == nil { - // If the entire state is nil, there can't be any orphans - return nil - } - if t.Config == nil { - // Should never happen: we can't be doing any Terraform operations - // without at least an empty configuration. - panic("OrphanResourceInstanceTransformer used without setting Config") - } - - // Go through the modules and for each module transform in order - // to add the orphan. - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - - return nil -} - -func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error { - if ms == nil { - return nil - } - - moduleAddr := ms.Addr - - // Get the configuration for this module. The configuration might be - // nil if the module was removed from the configuration. This is okay, - // this just means that every resource is an orphan. - var m *configs.Module - if c := t.Config.DescendentForInstance(moduleAddr); c != nil { - m = c.Module - } - - // An "orphan" is a resource that is in the state but not the configuration, - // so we'll walk the state resources and try to correlate each of them - // with a configuration block. Each orphan gets a node in the graph whose - // type is decided by t.Concrete. - // - // We don't handle orphans related to changes in the "count" and "for_each" - // pseudo-arguments here. They are handled by OrphanResourceCountTransformer. - for _, rs := range ms.Resources { - if m != nil { - if r := m.ResourceByAddr(rs.Addr); r != nil { - continue - } - } - - for key := range rs.Instances { - addr := rs.Addr.Instance(key).Absolute(moduleAddr) - abstract := NewNodeAbstractResourceInstance(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr) - g.Add(node) - } - } - - return nil -} - -// OrphanResourceTransformer is a GraphTransformer that adds orphaned -// resources to the graph. An "orphan" is a resource that is present in -// the state but no longer present in the config. -// -// This is separate to OrphanResourceInstanceTransformer in that it deals with -// whole resources, rather than individual instances of resources. Orphan -// resource nodes are only used during apply to clean up leftover empty -// resource state skeletons, after all of the instances inside have been -// removed. -// -// This transformer will also create edges in the graph to any pre-existing -// node that creates or destroys the entire orphaned resource or any of its -// instances, to ensure that the "orphan-ness" of a resource is always dealt -// with after all other aspects of it. -type OrphanResourceTransformer struct { - Concrete ConcreteResourceNodeFunc - - // State is the global state. - State *states.State - - // Config is the root node in the configuration tree. - Config *configs.Config -} - -func (t *OrphanResourceTransformer) Transform(g *Graph) error { - if t.State == nil { - // If the entire state is nil, there can't be any orphans - return nil - } - if t.Config == nil { - // Should never happen: we can't be doing any Terraform operations - // without at least an empty configuration. - panic("OrphanResourceTransformer used without setting Config") - } - - // We'll first collect up the existing nodes for each resource so we can - // create dependency edges for any new nodes we create. - deps := map[string][]dag.Vertex{} - for _, v := range g.Vertices() { - switch tv := v.(type) { - case GraphNodeResourceInstance: - k := tv.ResourceInstanceAddr().ContainingResource().String() - deps[k] = append(deps[k], v) - case GraphNodeResource: - k := tv.ResourceAddr().String() - deps[k] = append(deps[k], v) - case GraphNodeDestroyer: - k := tv.DestroyAddr().ContainingResource().String() - deps[k] = append(deps[k], v) - } - } - - for _, ms := range t.State.Modules { - moduleAddr := ms.Addr - - mc := t.Config.DescendentForInstance(moduleAddr) // might be nil if whole module has been removed - - for _, rs := range ms.Resources { - if mc != nil { - if r := mc.Module.ResourceByAddr(rs.Addr); r != nil { - // It's in the config, so nothing to do for this one. - continue - } - } - - addr := rs.Addr.Absolute(moduleAddr) - abstract := NewNodeAbstractResource(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceTransformer: adding whole-resource orphan node for %s", addr) - g.Add(node) - for _, dn := range deps[addr.String()] { - log.Printf("[TRACE] OrphanResourceTransformer: node %q depends on %q", dag.VertexName(node), dag.VertexName(dn)) - g.Connect(dag.BasicEdge(node, dn)) - } - } - } - - return nil - -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go deleted file mode 100644 index e2979ac5..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// OutputTransformer is a GraphTransformer that adds all the outputs -// in the configuration to the graph. -// -// This is done for the apply graph builder even if dependent nodes -// aren't changing since there is no downside: the state will be available -// even if the dependent items aren't changing. -type OutputTransformer struct { - Config *configs.Config -} - -func (t *OutputTransformer) Transform(g *Graph) error { - return t.transform(g, t.Config) -} - -func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error { - // If we have no config then there can be no outputs. - if c == nil { - return nil - } - - // Transform all the children. We must do this first because - // we can reference module outputs and they must show up in the - // reference map. - for _, cc := range c.Children { - if err := t.transform(g, cc); err != nil { - return err - } - } - - // Our addressing system distinguishes between modules and module instances, - // but we're not yet ready to make that distinction here (since we don't - // support "count"/"for_each" on modules) and so we just do a naive - // transform of the module path into a module instance path, assuming that - // no keys are in use. This should be removed when "count" and "for_each" - // are implemented for modules. - path := c.Path.UnkeyedInstanceShim() - - for _, o := range c.Module.Outputs { - addr := path.OutputValue(o.Name) - node := &NodeApplyableOutput{ - Addr: addr, - Config: o, - } - g.Add(node) - } - - return nil -} - -// DestroyOutputTransformer is a GraphTransformer that adds nodes to delete -// outputs during destroy. We need to do this to ensure that no stale outputs -// are ever left in the state. -type DestroyOutputTransformer struct { -} - -func (t *DestroyOutputTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - output, ok := v.(*NodeApplyableOutput) - if !ok { - continue - } - - // create the destroy node for this output - node := &NodeDestroyableOutput{ - Addr: output.Addr, - Config: output.Config, - } - - log.Printf("[TRACE] creating %s", node.Name()) - g.Add(node) - - deps, err := g.Descendents(v) - if err != nil { - return err - } - - // the destroy node must depend on the eval node - deps.Add(v) - - for _, d := range deps.List() { - log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d)) - g.Connect(dag.BasicEdge(node, d)) - } - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go deleted file mode 100644 index 9c8966fa..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go +++ /dev/null @@ -1,705 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { - return GraphTransformMulti( - // Add providers from the config - &ProviderConfigTransformer{ - Config: config, - Providers: providers, - Concrete: concrete, - }, - // Add any remaining missing providers - &MissingProviderTransformer{ - Providers: providers, - Concrete: concrete, - }, - // Connect the providers - &ProviderTransformer{ - Config: config, - }, - // Remove unused providers and proxies - &PruneProviderTransformer{}, - // Connect provider to their parent provider nodes - &ParentProviderTransformer{}, - ) -} - -// GraphNodeProvider is an interface that nodes that can be a provider -// must implement. -// -// ProviderAddr returns the address of the provider configuration this -// satisfies, which is relative to the path returned by method Path(). -// -// Name returns the full name of the provider in the config. -type GraphNodeProvider interface { - GraphNodeSubPath - ProviderAddr() addrs.AbsProviderConfig - Name() string -} - -// GraphNodeCloseProvider is an interface that nodes that can be a close -// provider must implement. The CloseProviderName returned is the name of -// the provider they satisfy. -type GraphNodeCloseProvider interface { - GraphNodeSubPath - CloseProviderAddr() addrs.AbsProviderConfig -} - -// GraphNodeProviderConsumer is an interface that nodes that require -// a provider must implement. ProvidedBy must return the address of the provider -// to use, which will be resolved to a configuration either in the same module -// or in an ancestor module, with the resulting absolute address passed to -// SetProvider. -type GraphNodeProviderConsumer interface { - // ProvidedBy returns the address of the provider configuration the node - // refers to. If the returned "exact" value is true, this address will - // be taken exactly. If "exact" is false, a provider configuration from - // an ancestor module may be selected instead. - ProvidedBy() (addr addrs.AbsProviderConfig, exact bool) - // Set the resolved provider address for this resource. - SetProvider(addrs.AbsProviderConfig) -} - -// ProviderTransformer is a GraphTransformer that maps resources to -// providers within the graph. This will error if there are any resources -// that don't map to proper resources. -type ProviderTransformer struct { - Config *configs.Config -} - -func (t *ProviderTransformer) Transform(g *Graph) error { - // We need to find a provider configuration address for each resource - // either directly represented by a node or referenced by a node in - // the graph, and then create graph edges from provider to provider user - // so that the providers will get initialized first. - - var diags tfdiags.Diagnostics - - // To start, we'll collect the _requested_ provider addresses for each - // node, which we'll then resolve (handling provider inheritence, etc) in - // the next step. - // Our "requested" map is from graph vertices to string representations of - // provider config addresses (for deduping) to requests. - type ProviderRequest struct { - Addr addrs.AbsProviderConfig - Exact bool // If true, inheritence from parent modules is not attempted - } - requested := map[dag.Vertex]map[string]ProviderRequest{} - needConfigured := map[string]addrs.AbsProviderConfig{} - for _, v := range g.Vertices() { - - // Does the vertex _directly_ use a provider? - if pv, ok := v.(GraphNodeProviderConsumer); ok { - requested[v] = make(map[string]ProviderRequest) - - p, exact := pv.ProvidedBy() - if exact { - log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), p) - } else { - log.Printf("[TRACE] ProviderTransformer: %s is provided by %s or inherited equivalent", dag.VertexName(v), p) - } - - requested[v][p.String()] = ProviderRequest{ - Addr: p, - Exact: exact, - } - - // Direct references need the provider configured as well as initialized - needConfigured[p.String()] = p - } - } - - // Now we'll go through all the requested addresses we just collected and - // figure out which _actual_ config address each belongs to, after resolving - // for provider inheritance and passing. - m := providerVertexMap(g) - for v, reqs := range requested { - for key, req := range reqs { - p := req.Addr - target := m[key] - - _, ok := v.(GraphNodeSubPath) - if !ok && target == nil { - // No target and no path to traverse up from - diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p)) - continue - } - - if target != nil { - log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v)) - } - - // if we don't have a provider at this level, walk up the path looking for one, - // unless we were told to be exact. - if target == nil && !req.Exact { - for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() { - key := pp.String() - target = m[key] - if target != nil { - log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp) - break - } - log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v)) - } - } - - // If this provider doesn't need to be configured then we can just - // stub it out with an init-only provider node, which will just - // start up the provider and fetch its schema. - if _, exists := needConfigured[key]; target == nil && !exists { - stubAddr := p.ProviderConfig.Absolute(addrs.RootModuleInstance) - stub := &NodeEvalableProvider{ - &NodeAbstractProvider{ - Addr: stubAddr, - }, - } - m[stubAddr.String()] = stub - log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr) - target = stub - g.Add(target) - } - - if target == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider configuration not present", - fmt.Sprintf( - "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.", - dag.VertexName(v), p, dag.VertexName(v), - ), - )) - break - } - - // see if this in an inherited provider - if p, ok := target.(*graphNodeProxyProvider); ok { - g.Remove(p) - target = p.Target() - key = target.(GraphNodeProvider).ProviderAddr().String() - } - - log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) - if pv, ok := v.(GraphNodeProviderConsumer); ok { - pv.SetProvider(target.ProviderAddr()) - } - g.Connect(dag.BasicEdge(v, target)) - } - } - - return diags.Err() -} - -// CloseProviderTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provider connections that aren't needed anymore. -// A provider connection is not needed anymore once all depended resources -// in the graph are evaluated. -type CloseProviderTransformer struct{} - -func (t *CloseProviderTransformer) Transform(g *Graph) error { - pm := providerVertexMap(g) - cpm := make(map[string]*graphNodeCloseProvider) - var err error - - for _, v := range pm { - p := v.(GraphNodeProvider) - key := p.ProviderAddr().String() - - // get the close provider of this type if we alread created it - closer := cpm[key] - - if closer == nil { - // create a closer for this provider type - closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()} - g.Add(closer) - cpm[key] = closer - } - - // Close node depends on the provider itself - // this is added unconditionally, so it will connect to all instances - // of the provider. Extra edges will be removed by transitive - // reduction. - g.Connect(dag.BasicEdge(closer, p)) - - // connect all the provider's resources to the close node - for _, s := range g.UpEdges(p).List() { - if _, ok := s.(GraphNodeProviderConsumer); ok { - g.Connect(dag.BasicEdge(closer, s)) - } - } - } - - return err -} - -// MissingProviderTransformer is a GraphTransformer that adds to the graph -// a node for each default provider configuration that is referenced by another -// node but not already present in the graph. -// -// These "default" nodes are always added to the root module, regardless of -// where they are requested. This is important because our inheritance -// resolution behavior in ProviderTransformer will then treat these as a -// last-ditch fallback after walking up the tree, rather than preferring them -// as it would if they were placed in the same module as the requester. -// -// This transformer may create extra nodes that are not needed in practice, -// due to overriding provider configurations in child modules. -// PruneProviderTransformer can then remove these once ProviderTransformer -// has resolved all of the inheritence, etc. -type MissingProviderTransformer struct { - // Providers is the list of providers we support. - Providers []string - - // Concrete, if set, overrides how the providers are made. - Concrete ConcreteProviderNodeFunc -} - -func (t *MissingProviderTransformer) Transform(g *Graph) error { - // Initialize factory - if t.Concrete == nil { - t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { - return a - } - } - - var err error - m := providerVertexMap(g) - for _, v := range g.Vertices() { - pv, ok := v.(GraphNodeProviderConsumer) - if !ok { - continue - } - - // For our work here we actually care only about the provider type and - // we plan to place all default providers in the root module, and so - // it's safe for us to rely on ProvidedBy here rather than waiting for - // the later proper resolution of provider inheritance done by - // ProviderTransformer. - p, _ := pv.ProvidedBy() - if p.ProviderConfig.Alias != "" { - // We do not create default aliased configurations. - log.Println("[TRACE] MissingProviderTransformer: skipping implication of aliased config", p) - continue - } - - // We're going to create an implicit _default_ configuration for the - // referenced provider type in the _root_ module, ignoring all other - // aspects of the resource's declared provider address. - defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(p.ProviderConfig.Type) - key := defaultAddr.String() - provider := m[key] - - if provider != nil { - // There's already an explicit default configuration for this - // provider type in the root module, so we have nothing to do. - continue - } - - log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v)) - - // create the missing top-level provider - provider = t.Concrete(&NodeAbstractProvider{ - Addr: defaultAddr, - }).(GraphNodeProvider) - - g.Add(provider) - m[key] = provider - } - - return err -} - -// ParentProviderTransformer connects provider nodes to their parents. -// -// This works by finding nodes that are both GraphNodeProviders and -// GraphNodeSubPath. It then connects the providers to their parent -// path. The parent provider is always at the root level. -type ParentProviderTransformer struct{} - -func (t *ParentProviderTransformer) Transform(g *Graph) error { - pm := providerVertexMap(g) - for _, v := range g.Vertices() { - // Only care about providers - pn, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // Also require non-empty path, since otherwise we're in the root - // module and so cannot have a parent. - if len(pn.Path()) <= 1 { - continue - } - - // this provider may be disabled, but we can only get it's name from - // the ProviderName string - addr := pn.ProviderAddr() - parentAddr, ok := addr.Inherited() - if ok { - parent := pm[parentAddr.String()] - if parent != nil { - g.Connect(dag.BasicEdge(v, parent)) - } - } - } - return nil -} - -// PruneProviderTransformer removes any providers that are not actually used by -// anything, and provider proxies. This avoids the provider being initialized -// and configured. This both saves resources but also avoids errors since -// configuration may imply initialization which may require auth. -type PruneProviderTransformer struct{} - -func (t *PruneProviderTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // We only care about providers - _, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // ProxyProviders will have up edges, but we're now done with them in the graph - if _, ok := v.(*graphNodeProxyProvider); ok { - log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v)) - g.Remove(v) - } - - // Remove providers with no dependencies. - if g.UpEdges(v).Len() == 0 { - log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v)) - g.Remove(v) - } - } - - return nil -} - -func providerVertexMap(g *Graph) map[string]GraphNodeProvider { - m := make(map[string]GraphNodeProvider) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvider); ok { - addr := pv.ProviderAddr() - m[addr.String()] = pv - } - } - - return m -} - -type graphNodeCloseProvider struct { - Addr addrs.AbsProviderConfig -} - -var ( - _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) -) - -func (n *graphNodeCloseProvider) Name() string { - return n.Addr.String() + " (close)" -} - -// GraphNodeSubPath impl. -func (n *graphNodeCloseProvider) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvider) EvalTree() EvalNode { - return CloseProviderEvalTree(n.Addr) -} - -// GraphNodeDependable impl. -func (n *graphNodeCloseProvider) DependableName() []string { - return []string{n.Name()} -} - -func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { - return n.Addr -} - -// GraphNodeDotter impl. -func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - if !opts.Verbose { - return nil - } - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} - -// RemovableIfNotTargeted -func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to -// store the name and value of a provider node for inheritance between modules. -// These nodes are only used to store the data while loading the provider -// configurations, and are removed after all the resources have been connected -// to their providers. -type graphNodeProxyProvider struct { - addr addrs.AbsProviderConfig - target GraphNodeProvider -} - -var ( - _ GraphNodeProvider = (*graphNodeProxyProvider)(nil) -) - -func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig { - return n.addr -} - -func (n *graphNodeProxyProvider) Path() addrs.ModuleInstance { - return n.addr.Module -} - -func (n *graphNodeProxyProvider) Name() string { - return n.addr.String() + " (proxy)" -} - -// find the concrete provider instance -func (n *graphNodeProxyProvider) Target() GraphNodeProvider { - switch t := n.target.(type) { - case *graphNodeProxyProvider: - return t.Target() - default: - return n.target - } -} - -// ProviderConfigTransformer adds all provider nodes from the configuration and -// attaches the configs. -type ProviderConfigTransformer struct { - Providers []string - Concrete ConcreteProviderNodeFunc - - // each provider node is stored here so that the proxy nodes can look up - // their targets by name. - providers map[string]GraphNodeProvider - // record providers that can be overriden with a proxy - proxiable map[string]bool - - // Config is the root node of the configuration tree to add providers from. - Config *configs.Config -} - -func (t *ProviderConfigTransformer) Transform(g *Graph) error { - // If no configuration is given, we don't do anything - if t.Config == nil { - return nil - } - - t.providers = make(map[string]GraphNodeProvider) - t.proxiable = make(map[string]bool) - - // Start the transformation process - if err := t.transform(g, t.Config); err != nil { - return err - } - - // finally attach the configs to the new nodes - return t.attachProviderConfigs(g) -} - -func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error { - // If no config, do nothing - if c == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, c); err != nil { - return err - } - - // Transform all the children. - for _, cc := range c.Children { - if err := t.transform(g, cc); err != nil { - return err - } - } - return nil -} - -func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error { - // Get the module associated with this configuration tree node - mod := c.Module - staticPath := c.Path - - // We actually need a dynamic module path here, but we've not yet updated - // our graph builders enough to support expansion of module calls with - // "count" and "for_each" set, so for now we'll shim this by converting to - // a dynamic path with no keys. At the time of writing this is the only - // possible kind of dynamic path anyway. - path := make(addrs.ModuleInstance, len(staticPath)) - for i, name := range staticPath { - path[i] = addrs.ModuleInstanceStep{ - Name: name, - } - } - - // add all providers from the configuration - for _, p := range mod.ProviderConfigs { - relAddr := p.Addr() - addr := relAddr.Absolute(path) - - abstract := &NodeAbstractProvider{ - Addr: addr, - } - var v dag.Vertex - if t.Concrete != nil { - v = t.Concrete(abstract) - } else { - v = abstract - } - - // Add it to the graph - g.Add(v) - key := addr.String() - t.providers[key] = v.(GraphNodeProvider) - - // A provider configuration is "proxyable" if its configuration is - // entirely empty. This means it's standing in for a provider - // configuration that must be passed in from the parent module. - // We decide this by evaluating the config with an empty schema; - // if this succeeds, then we know there's nothing in the body. - _, diags := p.Config.Content(&hcl.BodySchema{}) - t.proxiable[key] = !diags.HasErrors() - } - - // Now replace the provider nodes with proxy nodes if a provider was being - // passed in, and create implicit proxies if there was no config. Any extra - // proxies will be removed in the prune step. - return t.addProxyProviders(g, c) -} - -func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error { - path := c.Path - - // can't add proxies at the root - if len(path) == 0 { - return nil - } - - parentPath, callAddr := path.Call() - parent := c.Parent - if parent == nil { - return nil - } - - callName := callAddr.Name - var parentCfg *configs.ModuleCall - for name, mod := range parent.Module.ModuleCalls { - if name == callName { - parentCfg = mod - break - } - } - - // We currently don't support count/for_each for modules and so we must - // shim our path and parentPath into module instances here so that the - // rest of Terraform can behave as if we do. This shimming should be - // removed later as part of implementing count/for_each for modules. - instPath := make(addrs.ModuleInstance, len(path)) - for i, name := range path { - instPath[i] = addrs.ModuleInstanceStep{Name: name} - } - parentInstPath := make(addrs.ModuleInstance, len(parentPath)) - for i, name := range parentPath { - parentInstPath[i] = addrs.ModuleInstanceStep{Name: name} - } - - if parentCfg == nil { - // this can't really happen during normal execution. - return fmt.Errorf("parent module config not found for %s", c.Path.String()) - } - - // Go through all the providers the parent is passing in, and add proxies to - // the parent provider nodes. - for _, pair := range parentCfg.Providers { - fullAddr := pair.InChild.Addr().Absolute(instPath) - fullParentAddr := pair.InParent.Addr().Absolute(parentInstPath) - fullName := fullAddr.String() - fullParentName := fullParentAddr.String() - - parentProvider := t.providers[fullParentName] - - if parentProvider == nil { - return fmt.Errorf("missing provider %s", fullParentName) - } - - proxy := &graphNodeProxyProvider{ - addr: fullAddr, - target: parentProvider, - } - - concreteProvider := t.providers[fullName] - - // replace the concrete node with the provider passed in - if concreteProvider != nil && t.proxiable[fullName] { - g.Replace(concreteProvider, proxy) - t.providers[fullName] = proxy - continue - } - - // aliased configurations can't be implicitly passed in - if fullAddr.ProviderConfig.Alias != "" { - continue - } - - // There was no concrete provider, so add this as an implicit provider. - // The extra proxy will be pruned later if it's unused. - g.Add(proxy) - t.providers[fullName] = proxy - } - return nil -} - -func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachProvider implementations - apn, ok := v.(GraphNodeAttachProvider) - if !ok { - continue - } - - // Determine what we're looking for - addr := apn.ProviderAddr() - - // Get the configuration. - mc := t.Config.DescendentForInstance(addr.Module) - if mc == nil { - log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String()) - continue - } - - // Go through the provider configs to find the matching config - for _, p := range mc.Module.ProviderConfigs { - if p.Name == addr.ProviderConfig.Type && p.Alias == addr.ProviderConfig.Alias { - log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange) - apn.AttachProvider(p) - break - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go deleted file mode 100644 index e6fe25da..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go +++ /dev/null @@ -1,205 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeProvisioner is an interface that nodes that can be a provisioner -// must implement. The ProvisionerName returned is the name of the provisioner -// they satisfy. -type GraphNodeProvisioner interface { - ProvisionerName() string -} - -// GraphNodeCloseProvisioner is an interface that nodes that can be a close -// provisioner must implement. The CloseProvisionerName returned is the name -// of the provisioner they satisfy. -type GraphNodeCloseProvisioner interface { - CloseProvisionerName() string -} - -// GraphNodeProvisionerConsumer is an interface that nodes that require -// a provisioner must implement. ProvisionedBy must return the names of the -// provisioners to use. -type GraphNodeProvisionerConsumer interface { - ProvisionedBy() []string -} - -// ProvisionerTransformer is a GraphTransformer that maps resources to -// provisioners within the graph. This will error if there are any resources -// that don't map to proper resources. -type ProvisionerTransformer struct{} - -func (t *ProvisionerTransformer) Transform(g *Graph) error { - // Go through the other nodes and match them to provisioners they need - var err error - m := provisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - key := provisionerMapKey(p, pv) - if m[key] == nil { - err = multierror.Append(err, fmt.Errorf( - "%s: provisioner %s couldn't be found", - dag.VertexName(v), p)) - continue - } - - log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), key, dag.VertexName(m[key])) - g.Connect(dag.BasicEdge(v, m[key])) - } - } - } - - return err -} - -// MissingProvisionerTransformer is a GraphTransformer that adds nodes -// for missing provisioners into the graph. -type MissingProvisionerTransformer struct { - // Provisioners is the list of provisioners we support. - Provisioners []string -} - -func (t *MissingProvisionerTransformer) Transform(g *Graph) error { - // Create a set of our supported provisioners - supported := make(map[string]struct{}, len(t.Provisioners)) - for _, v := range t.Provisioners { - supported[v] = struct{}{} - } - - // Get the map of provisioners we already have in our graph - m := provisionerVertexMap(g) - - // Go through all the provisioner consumers and make sure we add - // that provisioner if it is missing. - for _, v := range g.Vertices() { - pv, ok := v.(GraphNodeProvisionerConsumer) - if !ok { - continue - } - - // If this node has a subpath, then we use that as a prefix - // into our map to check for an existing provider. - path := addrs.RootModuleInstance - if sp, ok := pv.(GraphNodeSubPath); ok { - path = sp.Path() - } - - for _, p := range pv.ProvisionedBy() { - // Build the key for storing in the map - key := provisionerMapKey(p, pv) - - if _, ok := m[key]; ok { - // This provisioner already exists as a configure node - continue - } - - if _, ok := supported[p]; !ok { - // If we don't support the provisioner type, we skip it. - // Validation later will catch this as an error. - continue - } - - // Build the vertex - var newV dag.Vertex = &NodeProvisioner{ - NameValue: p, - PathValue: path, - } - - // Add the missing provisioner node to the graph - m[key] = g.Add(newV) - log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", key, dag.VertexName(v)) - } - } - - return nil -} - -// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provisioner connections that aren't needed -// anymore. A provisioner connection is not needed anymore once all depended -// resources in the graph are evaluated. -type CloseProvisionerTransformer struct{} - -func (t *CloseProvisionerTransformer) Transform(g *Graph) error { - m := closeProvisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - source := m[p] - - if source == nil { - // Create a new graphNodeCloseProvisioner and add it to the graph - source = &graphNodeCloseProvisioner{ProvisionerNameValue: p} - g.Add(source) - - // Make sure we also add the new graphNodeCloseProvisioner to the map - // so we don't create and add any duplicate graphNodeCloseProvisioners. - m[p] = source - } - - g.Connect(dag.BasicEdge(source, v)) - } - } - } - - return nil -} - -// provisionerMapKey is a helper that gives us the key to use for the -// maps returned by things such as provisionerVertexMap. -func provisionerMapKey(k string, v dag.Vertex) string { - pathPrefix := "" - if sp, ok := v.(GraphNodeSubPath); ok { - pathPrefix = sp.Path().String() + "." - } - - return pathPrefix + k -} - -func provisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisioner); ok { - key := provisionerMapKey(pv.ProvisionerName(), v) - m[key] = v - } - } - - return m -} - -func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeCloseProvisioner); ok { - m[pv.CloseProvisionerName()] = v - } - } - - return m -} - -type graphNodeCloseProvisioner struct { - ProvisionerNameValue string -} - -func (n *graphNodeCloseProvisioner) Name() string { - return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue) -} - -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvisioner) EvalTree() EvalNode { - return &EvalCloseProvisioner{Name: n.ProvisionerNameValue} -} - -func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { - return n.ProvisionerNameValue -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go deleted file mode 100644 index 54f9829c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go +++ /dev/null @@ -1,446 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" -) - -// GraphNodeReferenceable must be implemented by any node that represents -// a Terraform thing that can be referenced (resource, module, etc.). -// -// Even if the thing has no name, this should return an empty list. By -// implementing this and returning a non-nil result, you say that this CAN -// be referenced and other methods of referencing may still be possible (such -// as by path!) -type GraphNodeReferenceable interface { - GraphNodeSubPath - - // ReferenceableAddrs returns a list of addresses through which this can be - // referenced. - ReferenceableAddrs() []addrs.Referenceable -} - -// GraphNodeReferencer must be implemented by nodes that reference other -// Terraform items and therefore depend on them. -type GraphNodeReferencer interface { - GraphNodeSubPath - - // References returns a list of references made by this node, which - // include both a referenced address and source location information for - // the reference. - References() []*addrs.Reference -} - -// GraphNodeReferenceOutside is an interface that can optionally be implemented. -// A node that implements it can specify that its own referenceable addresses -// and/or the addresses it references are in a different module than the -// node itself. -// -// Any referenceable addresses returned by ReferenceableAddrs are interpreted -// relative to the returned selfPath. -// -// Any references returned by References are interpreted relative to the -// returned referencePath. -// -// It is valid but not required for either of these paths to match what is -// returned by method Path, though if both match the main Path then there -// is no reason to implement this method. -// -// The primary use-case for this is the nodes representing module input -// variables, since their expressions are resolved in terms of their calling -// module, but they are still referenced from their own module. -type GraphNodeReferenceOutside interface { - // ReferenceOutside returns a path in which any references from this node - // are resolved. - ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) -} - -// ReferenceTransformer is a GraphTransformer that connects all the -// nodes that reference each other in order to form the proper ordering. -type ReferenceTransformer struct{} - -func (t *ReferenceTransformer) Transform(g *Graph) error { - // Build a reference map so we can efficiently look up the references - vs := g.Vertices() - m := NewReferenceMap(vs) - - // Find the things that reference things and connect them - for _, v := range vs { - parents, _ := m.References(v) - parentsDbg := make([]string, len(parents)) - for i, v := range parents { - parentsDbg[i] = dag.VertexName(v) - } - log.Printf( - "[DEBUG] ReferenceTransformer: %q references: %v", - dag.VertexName(v), parentsDbg) - - for _, parent := range parents { - g.Connect(dag.BasicEdge(v, parent)) - } - } - - return nil -} - -// DestroyReferenceTransformer is a GraphTransformer that reverses the edges -// for locals and outputs that depend on other nodes which will be -// removed during destroy. If a destroy node is evaluated before the local or -// output value, it will be removed from the state, and the later interpolation -// will fail. -type DestroyValueReferenceTransformer struct{} - -func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error { - vs := g.Vertices() - for _, v := range vs { - switch v.(type) { - case *NodeApplyableOutput, *NodeLocal: - // OK - default: - continue - } - - // reverse any outgoing edges so that the value is evaluated first. - for _, e := range g.EdgesFrom(v) { - target := e.Target() - - // only destroy nodes will be evaluated in reverse - if _, ok := target.(GraphNodeDestroyer); !ok { - continue - } - - log.Printf("[TRACE] output dep: %s", dag.VertexName(target)) - - g.RemoveEdge(e) - g.Connect(&DestroyEdge{S: target, T: v}) - } - } - - return nil -} - -// PruneUnusedValuesTransformer is s GraphTransformer that removes local and -// output values which are not referenced in the graph. Since outputs and -// locals always need to be evaluated, if they reference a resource that is not -// available in the state the interpolation could fail. -type PruneUnusedValuesTransformer struct{} - -func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error { - // this might need multiple runs in order to ensure that pruning a value - // doesn't effect a previously checked value. - for removed := 0; ; removed = 0 { - for _, v := range g.Vertices() { - switch v.(type) { - case *NodeApplyableOutput, *NodeLocal: - // OK - default: - continue - } - - dependants := g.UpEdges(v) - - switch dependants.Len() { - case 0: - // nothing at all depends on this - g.Remove(v) - removed++ - case 1: - // because an output's destroy node always depends on the output, - // we need to check for the case of a single destroy node. - d := dependants.List()[0] - if _, ok := d.(*NodeDestroyableOutput); ok { - g.Remove(v) - removed++ - } - } - } - if removed == 0 { - break - } - } - - return nil -} - -// ReferenceMap is a structure that can be used to efficiently check -// for references on a graph. -type ReferenceMap struct { - // vertices is a map from internal reference keys (as produced by the - // mapKey method) to one or more vertices that are identified by each key. - // - // A particular reference key might actually identify multiple vertices, - // e.g. in situations where one object is contained inside another. - vertices map[string][]dag.Vertex - - // edges is a map whose keys are a subset of the internal reference keys - // from "vertices", and whose values are the nodes that refer to each - // key. The values in this map are the referrers, while values in - // "verticies" are the referents. The keys in both cases are referents. - edges map[string][]dag.Vertex -} - -// References returns the set of vertices that the given vertex refers to, -// and any referenced addresses that do not have corresponding vertices. -func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) { - rn, ok := v.(GraphNodeReferencer) - if !ok { - return nil, nil - } - if _, ok := v.(GraphNodeSubPath); !ok { - return nil, nil - } - - var matches []dag.Vertex - var missing []addrs.Referenceable - - for _, ref := range rn.References() { - subject := ref.Subject - - key := m.referenceMapKey(v, subject) - if _, exists := m.vertices[key]; !exists { - // If what we were looking for was a ResourceInstance then we - // might be in a resource-oriented graph rather than an - // instance-oriented graph, and so we'll see if we have the - // resource itself instead. - switch ri := subject.(type) { - case addrs.ResourceInstance: - subject = ri.ContainingResource() - case addrs.ResourceInstancePhase: - subject = ri.ContainingResource() - } - key = m.referenceMapKey(v, subject) - } - - vertices := m.vertices[key] - for _, rv := range vertices { - // don't include self-references - if rv == v { - continue - } - matches = append(matches, rv) - } - if len(vertices) == 0 { - missing = append(missing, ref.Subject) - } - } - - return matches, missing -} - -// Referrers returns the set of vertices that refer to the given vertex. -func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex { - rn, ok := v.(GraphNodeReferenceable) - if !ok { - return nil - } - sp, ok := v.(GraphNodeSubPath) - if !ok { - return nil - } - - var matches []dag.Vertex - for _, addr := range rn.ReferenceableAddrs() { - key := m.mapKey(sp.Path(), addr) - referrers, ok := m.edges[key] - if !ok { - continue - } - - // If the referrer set includes our own given vertex then we skip, - // since we don't want to return self-references. - selfRef := false - for _, p := range referrers { - if p == v { - selfRef = true - break - } - } - if selfRef { - continue - } - - matches = append(matches, referrers...) - } - - return matches -} - -func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string { - return fmt.Sprintf("%s|%s", path.String(), addr.String()) -} - -// vertexReferenceablePath returns the path in which the given vertex can be -// referenced. This is the path that its results from ReferenceableAddrs -// are considered to be relative to. -// -// Only GraphNodeSubPath implementations can be referenced, so this method will -// panic if the given vertex does not implement that interface. -func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance { - sp, ok := v.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp)) - } - - if outside, ok := v.(GraphNodeReferenceOutside); ok { - // Vertex is referenced from a different module than where it was - // declared. - path, _ := outside.ReferenceOutside() - return path - } - - // Vertex is referenced from the same module as where it was declared. - return sp.Path() -} - -// vertexReferencePath returns the path in which references _from_ the given -// vertex must be interpreted. -// -// Only GraphNodeSubPath implementations can have references, so this method -// will panic if the given vertex does not implement that interface. -func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance { - sp, ok := referrer.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp)) - } - - var path addrs.ModuleInstance - if outside, ok := referrer.(GraphNodeReferenceOutside); ok { - // Vertex makes references to objects in a different module than where - // it was declared. - _, path = outside.ReferenceOutside() - return path - } - - // Vertex makes references to objects in the same module as where it - // was declared. - return sp.Path() -} - -// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex -// that the reference is from, and "addr" is the address of the object being -// referenced. -// -// The result is an opaque string that includes both the address of the given -// object and the address of the module instance that object belongs to. -// -// Only GraphNodeSubPath implementations can be referrers, so this method will -// panic if the given vertex does not implement that interface. -func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string { - path := vertexReferencePath(referrer) - return m.mapKey(path, addr) -} - -// NewReferenceMap is used to create a new reference map for the -// given set of vertices. -func NewReferenceMap(vs []dag.Vertex) *ReferenceMap { - var m ReferenceMap - - // Build the lookup table - vertices := make(map[string][]dag.Vertex) - for _, v := range vs { - _, ok := v.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - continue - } - - // We're only looking for referenceable nodes - rn, ok := v.(GraphNodeReferenceable) - if !ok { - continue - } - - path := m.vertexReferenceablePath(v) - - // Go through and cache them - for _, addr := range rn.ReferenceableAddrs() { - key := m.mapKey(path, addr) - vertices[key] = append(vertices[key], v) - } - - // Any node can be referenced by the address of the module it belongs - // to or any of that module's ancestors. - for _, addr := range path.Ancestors()[1:] { - // Can be referenced either as the specific call instance (with - // an instance key) or as the bare module call itself (the "module" - // block in the parent module that created the instance). - callPath, call := addr.Call() - callInstPath, callInst := addr.CallInstance() - callKey := m.mapKey(callPath, call) - callInstKey := m.mapKey(callInstPath, callInst) - vertices[callKey] = append(vertices[callKey], v) - vertices[callInstKey] = append(vertices[callInstKey], v) - } - } - - // Build the lookup table for referenced by - edges := make(map[string][]dag.Vertex) - for _, v := range vs { - _, ok := v.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - continue - } - - rn, ok := v.(GraphNodeReferencer) - if !ok { - // We're only looking for referenceable nodes - continue - } - - // Go through and cache them - for _, ref := range rn.References() { - if ref.Subject == nil { - // Should never happen - panic(fmt.Sprintf("%T.References returned reference with nil subject", rn)) - } - key := m.referenceMapKey(v, ref.Subject) - edges[key] = append(edges[key], v) - } - } - - m.vertices = vertices - m.edges = edges - return &m -} - -// ReferencesFromConfig returns the references that a configuration has -// based on the interpolated variables in a configuration. -func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference { - if body == nil { - return nil - } - refs, _ := lang.ReferencesInBlock(body, schema) - return refs -} - -// appendResourceDestroyReferences identifies resource and resource instance -// references in the given slice and appends to it the "destroy-phase" -// equivalents of those references, returning the result. -// -// This can be used in the References implementation for a node which must also -// depend on the destruction of anything it references. -func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference { - given := refs - for _, ref := range given { - switch tr := ref.Subject.(type) { - case addrs.Resource: - newRef := *ref // shallow copy - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - refs = append(refs, &newRef) - case addrs.ResourceInstance: - newRef := *ref // shallow copy - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - refs = append(refs, &newRef) - } - } - return refs -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go deleted file mode 100644 index 327950d8..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go +++ /dev/null @@ -1,33 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// RemovedModuleTransformer implements GraphTransformer to add nodes indicating -// when a module was removed from the configuration. -type RemovedModuleTransformer struct { - Config *configs.Config // root node in the config tree - State *states.State -} - -func (t *RemovedModuleTransformer) Transform(g *Graph) error { - // nothing to remove if there's no state! - if t.State == nil { - return nil - } - - for _, m := range t.State.Modules { - cc := t.Config.DescendentForInstance(m.Addr) - if cc != nil { - continue - } - - log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr) - g.Add(&NodeModuleRemoved{Addr: m.Addr}) - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go deleted file mode 100644 index 51d9466a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go +++ /dev/null @@ -1,71 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/zclconf/go-cty/cty" -) - -// ResourceCountTransformer is a GraphTransformer that expands the count -// out for a specific resource. -// -// This assumes that the count is already interpolated. -type ResourceCountTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - Schema *configschema.Block - - // Count is either the number of indexed instances to create, or -1 to - // indicate that count is not set at all and thus a no-key instance should - // be created. - Count int - ForEach map[string]cty.Value - Addr addrs.AbsResource -} - -func (t *ResourceCountTransformer) Transform(g *Graph) error { - if t.Count < 0 && t.ForEach == nil { - // Negative count indicates that count is not set at all. - addr := t.Addr.Instance(addrs.NoKey) - - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - return nil - } - - // Add nodes related to the for_each expression - for key := range t.ForEach { - addr := t.Addr.Instance(addrs.StringKey(key)) - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - // For each count, build and add the node - for i := 0; i < t.Count; i++ { - key := addrs.IntKey(i) - addr := t.Addr.Instance(key) - - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go deleted file mode 100644 index 485c1c8a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go +++ /dev/null @@ -1,38 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - -const rootNodeName = "root" - -// RootTransformer is a GraphTransformer that adds a root to the graph. -type RootTransformer struct{} - -func (t *RootTransformer) Transform(g *Graph) error { - // If we already have a good root, we're done - if _, err := g.Root(); err == nil { - return nil - } - - // Add a root - var root graphNodeRoot - g.Add(root) - - // Connect the root to all the edges that need it - for _, v := range g.Vertices() { - if v == root { - continue - } - - if g.UpEdges(v).Len() == 0 { - g.Connect(dag.BasicEdge(root, v)) - } - } - - return nil -} - -type graphNodeRoot struct{} - -func (n graphNodeRoot) Name() string { - return rootNodeName -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go deleted file mode 100644 index e7d95be9..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// StateTransformer is a GraphTransformer that adds the elements of -// the state to the graph. -// -// This transform is used for example by the DestroyPlanGraphBuilder to ensure -// that only resources that are in the state are represented in the graph. -type StateTransformer struct { - // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract - // resource instance nodes that this transformer will create. - // - // If either of these is nil, the objects of that type will be skipped and - // not added to the graph at all. It doesn't make sense to use this - // transformer without setting at least one of these, since that would - // skip everything and thus be a no-op. - ConcreteCurrent ConcreteResourceInstanceNodeFunc - ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc - - State *states.State -} - -func (t *StateTransformer) Transform(g *Graph) error { - if !t.State.HasResources() { - log.Printf("[TRACE] StateTransformer: state is empty, so nothing to do") - return nil - } - - switch { - case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects") - case t.ConcreteCurrent != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only") - case t.ConcreteDeposed != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only") - default: - log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all") - } - - for _, ms := range t.State.Modules { - moduleAddr := ms.Addr - - for _, rs := range ms.Resources { - resourceAddr := rs.Addr.Absolute(moduleAddr) - - for key, is := range rs.Instances { - addr := resourceAddr.Instance(key) - - if obj := is.Current; obj != nil && t.ConcreteCurrent != nil { - abstract := NewNodeAbstractResourceInstance(addr) - node := t.ConcreteCurrent(abstract) - g.Add(node) - log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr) - } - - if t.ConcreteDeposed != nil { - for dk := range is.Deposed { - abstract := NewNodeAbstractResourceInstance(addr) - node := t.ConcreteDeposed(abstract, dk) - g.Add(node) - log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk) - } - } - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go deleted file mode 100644 index beb1eed9..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go +++ /dev/null @@ -1,267 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeTargetable is an interface for graph nodes to implement when they -// need to be told about incoming targets. This is useful for nodes that need -// to respect targets as they dynamically expand. Note that the list of targets -// provided will contain every target provided, and each implementing graph -// node must filter this list to targets considered relevant. -type GraphNodeTargetable interface { - SetTargets([]addrs.Targetable) -} - -// GraphNodeTargetDownstream is an interface for graph nodes that need to -// be remain present under targeting if any of their dependencies are targeted. -// TargetDownstream is called with the set of vertices that are direct -// dependencies for the node, and it should return true if the node must remain -// in the graph in support of those dependencies. -// -// This is used in situations where the dependency edges are representing an -// ordering relationship but the dependency must still be visited if its -// dependencies are visited. This is true for outputs, for example, since -// they must get updated if any of their dependent resources get updated, -// which would not normally be true if one of their dependencies were targeted. -type GraphNodeTargetDownstream interface { - TargetDownstream(targeted, untargeted *dag.Set) bool -} - -// TargetsTransformer is a GraphTransformer that, when the user specifies a -// list of resources to target, limits the graph to only those resources and -// their dependencies. -type TargetsTransformer struct { - // List of targeted resource names specified by the user - Targets []addrs.Targetable - - // If set, the index portions of resource addresses will be ignored - // for comparison. This is used when transforming a graph where - // counted resources have not yet been expanded, since otherwise - // the unexpanded nodes (which never have indices) would not match. - IgnoreIndices bool - - // Set to true when we're in a `terraform destroy` or a - // `terraform plan -destroy` - Destroy bool -} - -func (t *TargetsTransformer) Transform(g *Graph) error { - if len(t.Targets) > 0 { - targetedNodes, err := t.selectTargetedNodes(g, t.Targets) - if err != nil { - return err - } - - for _, v := range g.Vertices() { - removable := false - if _, ok := v.(GraphNodeResource); ok { - removable = true - } - - if vr, ok := v.(RemovableIfNotTargeted); ok { - removable = vr.RemoveIfNotTargeted() - } - - if removable && !targetedNodes.Include(v) { - log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) - g.Remove(v) - } - } - } - - return nil -} - -// Returns a set of targeted nodes. A targeted node is either addressed -// directly, address indirectly via its container, or it's a dependency of a -// targeted node. Destroy mode keeps dependents instead of dependencies. -func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (*dag.Set, error) { - targetedNodes := new(dag.Set) - - vertices := g.Vertices() - - for _, v := range vertices { - if t.nodeIsTarget(v, addrs) { - targetedNodes.Add(v) - - // We inform nodes that ask about the list of targets - helps for nodes - // that need to dynamically expand. Note that this only occurs for nodes - // that are already directly targeted. - if tn, ok := v.(GraphNodeTargetable); ok { - tn.SetTargets(addrs) - } - - var deps *dag.Set - var err error - if t.Destroy { - deps, err = g.Descendents(v) - } else { - deps, err = g.Ancestors(v) - } - if err != nil { - return nil, err - } - - for _, d := range deps.List() { - targetedNodes.Add(d) - } - } - } - return t.addDependencies(targetedNodes, g) -} - -func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (*dag.Set, error) { - // Handle nodes that need to be included if their dependencies are included. - // This requires multiple passes since we need to catch transitive - // dependencies if and only if they are via other nodes that also - // support TargetDownstream. For example: - // output -> output -> targeted-resource: both outputs need to be targeted - // output -> non-targeted-resource -> targeted-resource: output not targeted - // - // We'll keep looping until we stop targeting more nodes. - queue := targetedNodes.List() - for len(queue) > 0 { - vertices := queue - queue = nil // ready to append for next iteration if neccessary - for _, v := range vertices { - // providers don't cause transitive dependencies, so don't target - // downstream from them. - if _, ok := v.(GraphNodeProvider); ok { - continue - } - - dependers := g.UpEdges(v) - if dependers == nil { - // indicates that there are no up edges for this node, so - // we have nothing to do here. - continue - } - - dependers = dependers.Filter(func(dv interface{}) bool { - _, ok := dv.(GraphNodeTargetDownstream) - return ok - }) - - if dependers.Len() == 0 { - continue - } - - for _, dv := range dependers.List() { - if targetedNodes.Include(dv) { - // Already present, so nothing to do - continue - } - - // We'll give the node some information about what it's - // depending on in case that informs its decision about whether - // it is safe to be targeted. - deps := g.DownEdges(v) - - depsTargeted := deps.Intersection(targetedNodes) - depsUntargeted := deps.Difference(depsTargeted) - - if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { - targetedNodes.Add(dv) - // Need to visit this node on the next pass to see if it - // has any transitive dependers. - queue = append(queue, dv) - } - } - } - } - - return targetedNodes.Filter(func(dv interface{}) bool { - return filterPartialOutputs(dv, targetedNodes, g) - }), nil -} - -// Outputs may have been included transitively, but if any of their -// dependencies have been pruned they won't be resolvable. -// If nothing depends on the output, and the output is missing any -// dependencies, remove it from the graph. -// This essentially maintains the previous behavior where interpolation in -// outputs would fail silently, but can now surface errors where the output -// is required. -func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool { - // should this just be done with TargetDownstream? - if _, ok := v.(*NodeApplyableOutput); !ok { - return true - } - - dependers := g.UpEdges(v) - for _, d := range dependers.List() { - if _, ok := d.(*NodeCountBoundary); ok { - continue - } - - if !targetedNodes.Include(d) { - // this one is going to be removed, so it doesn't count - continue - } - - // as soon as we see a real dependency, we mark this as - // non-removable - return true - } - - depends := g.DownEdges(v) - - for _, d := range depends.List() { - if !targetedNodes.Include(d) { - log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph", - dag.VertexName(v), dag.VertexName(d)) - return false - } - } - return true -} - -func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool { - var vertexAddr addrs.Targetable - switch r := v.(type) { - case GraphNodeResourceInstance: - vertexAddr = r.ResourceInstanceAddr() - case GraphNodeResource: - vertexAddr = r.ResourceAddr() - default: - // Only resource and resource instance nodes can be targeted. - return false - } - _, ok := v.(GraphNodeResource) - if !ok { - return false - } - - for _, targetAddr := range targets { - if t.IgnoreIndices { - // If we're ignoring indices then we'll convert any resource instance - // addresses into resource addresses. We don't need to convert - // vertexAddr because instance addresses are contained within - // their associated resources, and so .TargetContains will take - // care of this for us. - if instance, isInstance := targetAddr.(addrs.AbsResourceInstance); isInstance { - targetAddr = instance.ContainingResource() - } - } - if targetAddr.TargetContains(vertexAddr) { - return true - } - } - - return false -} - -// RemovableIfNotTargeted is a special interface for graph nodes that -// aren't directly addressable, but need to be removed from the graph when they -// are not targeted. (Nodes that are not directly targeted end up in the set of -// targeted nodes because something that _is_ targeted depends on them.) The -// initial use case for this interface is GraphNodeConfigVariable, which was -// having trouble interpolating for module variables in targeted scenarios that -// filtered out the resource node being referenced. -type RemovableIfNotTargeted interface { - RemoveIfNotTargeted() bool -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go deleted file mode 100644 index 21842789..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// TransitiveReductionTransformer is a GraphTransformer that performs -// finds the transitive reduction of the graph. For a definition of -// transitive reduction, see Wikipedia. -type TransitiveReductionTransformer struct{} - -func (t *TransitiveReductionTransformer) Transform(g *Graph) error { - // If the graph isn't valid, skip the transitive reduction. - // We don't error here because Terraform itself handles graph - // validation in a better way, or we assume it does. - if err := g.Validate(); err != nil { - return nil - } - - // Do it - g.TransitiveReduction() - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go deleted file mode 100644 index 3afce566..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go +++ /dev/null @@ -1,40 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// RootVariableTransformer is a GraphTransformer that adds all the root -// variables to the graph. -// -// Root variables are currently no-ops but they must be added to the -// graph since downstream things that depend on them must be able to -// reach them. -type RootVariableTransformer struct { - Config *configs.Config -} - -func (t *RootVariableTransformer) Transform(g *Graph) error { - // We can have no variables if we have no config. - if t.Config == nil { - return nil - } - - // We're only considering root module variables here, since child - // module variables are handled by ModuleVariableTransformer. - vars := t.Config.Module.Variables - - // Add all variables here - for _, v := range vars { - node := &NodeRootVariable{ - Addr: addrs.InputVariable{ - Name: v.Name, - }, - Config: v, - } - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go deleted file mode 100644 index 6b3c62d1..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// VertexTransformer is a GraphTransformer that transforms vertices -// using the GraphVertexTransformers. The Transforms are run in sequential -// order. If a transform replaces a vertex then the next transform will see -// the new vertex. -type VertexTransformer struct { - Transforms []GraphVertexTransformer -} - -func (t *VertexTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - for _, vt := range t.Transforms { - newV, err := vt.Transform(v) - if err != nil { - return err - } - - // If the vertex didn't change, then don't do anything more - if newV == v { - continue - } - - // Vertex changed, replace it within the graph - if ok := g.Replace(v, newV); !ok { - // This should never happen, big problem - return fmt.Errorf( - "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", - dag.VertexName(v), dag.VertexName(newV), v, newV) - } - - // Replace v so that future transforms use the proper vertex - v = newV - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go deleted file mode 100644 index f6790d9e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go +++ /dev/null @@ -1,28 +0,0 @@ -package terraform - -import "context" - -// UIInput is the interface that must be implemented to ask for input -// from this user. This should forward the request to wherever the user -// inputs things to ask for values. -type UIInput interface { - Input(context.Context, *InputOpts) (string, error) -} - -// InputOpts are options for asking for input. -type InputOpts struct { - // Id is a unique ID for the question being asked that might be - // used for logging or to look up a prior answered question. - Id string - - // Query is a human-friendly question for inputting this value. - Query string - - // Description is a description about what this option is. Be wary - // that this will probably be in a terminal so split lines as you see - // necessary. - Description string - - // Default will be the value returned if no data is entered. - Default string -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go deleted file mode 100644 index e2d9c384..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -import "context" - -// MockUIInput is an implementation of UIInput that can be used for tests. -type MockUIInput struct { - InputCalled bool - InputOpts *InputOpts - InputReturnMap map[string]string - InputReturnString string - InputReturnError error - InputFn func(*InputOpts) (string, error) -} - -func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { - i.InputCalled = true - i.InputOpts = opts - if i.InputFn != nil { - return i.InputFn(opts) - } - if i.InputReturnMap != nil { - return i.InputReturnMap[opts.Id], i.InputReturnError - } - return i.InputReturnString, i.InputReturnError -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go deleted file mode 100644 index b5d32b1e..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -import ( - "context" - "fmt" -) - -// PrefixUIInput is an implementation of UIInput that prefixes the ID -// with a string, allowing queries to be namespaced. -type PrefixUIInput struct { - IdPrefix string - QueryPrefix string - UIInput UIInput -} - -func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { - opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) - opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) - return i.UIInput.Input(ctx, opts) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go deleted file mode 100644 index 84427c63..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -// UIOutput is the interface that must be implemented to output -// data to the end user. -type UIOutput interface { - Output(string) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go deleted file mode 100644 index 135a91c5..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -type CallbackUIOutput struct { - OutputFn func(string) -} - -func (o *CallbackUIOutput) Output(v string) { - o.OutputFn(v) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go deleted file mode 100644 index d828c921..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go +++ /dev/null @@ -1,21 +0,0 @@ -package terraform - -import "sync" - -// MockUIOutput is an implementation of UIOutput that can be used for tests. -type MockUIOutput struct { - sync.Mutex - OutputCalled bool - OutputMessage string - OutputFn func(string) -} - -func (o *MockUIOutput) Output(v string) { - o.Lock() - defer o.Unlock() - o.OutputCalled = true - o.OutputMessage = v - if o.OutputFn != nil { - o.OutputFn(v) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go deleted file mode 100644 index 0d7d4ce0..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// ProvisionerUIOutput is an implementation of UIOutput that calls a hook -// for the output so that the hooks can handle it. -type ProvisionerUIOutput struct { - InstanceAddr addrs.AbsResourceInstance - ProvisionerType string - Hooks []Hook -} - -func (o *ProvisionerUIOutput) Output(msg string) { - for _, h := range o.Hooks { - h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go deleted file mode 100644 index 5428cd5a..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go +++ /dev/null @@ -1,75 +0,0 @@ -package terraform - -import ( - "sort" -) - -// Semaphore is a wrapper around a channel to provide -// utility methods to clarify that we are treating the -// channel as a semaphore -type Semaphore chan struct{} - -// NewSemaphore creates a semaphore that allows up -// to a given limit of simultaneous acquisitions -func NewSemaphore(n int) Semaphore { - if n == 0 { - panic("semaphore with limit 0") - } - ch := make(chan struct{}, n) - return Semaphore(ch) -} - -// Acquire is used to acquire an available slot. -// Blocks until available. -func (s Semaphore) Acquire() { - s <- struct{}{} -} - -// TryAcquire is used to do a non-blocking acquire. -// Returns a bool indicating success -func (s Semaphore) TryAcquire() bool { - select { - case s <- struct{}{}: - return true - default: - return false - } -} - -// Release is used to return a slot. Acquire must -// be called as a pre-condition. -func (s Semaphore) Release() { - select { - case <-s: - default: - panic("release without an acquire") - } -} - -// strSliceContains checks if a given string is contained in a slice -// When anybody asks why Go needs generics, here you go. -func strSliceContains(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - return false -} - -// deduplicate a slice of strings -func uniqueStrings(s []string) []string { - if len(s) < 2 { - return s - } - - sort.Strings(s) - result := make([]string, 1, len(s)) - result[0] = s[0] - for i := 1; i < len(s); i++ { - if s[i] != result[len(result)-1] { - result = append(result, s[i]) - } - } - return result -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go deleted file mode 100644 index 627593d7..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ValueFromUnknown-0] - _ = x[ValueFromConfig-67] - _ = x[ValueFromAutoFile-70] - _ = x[ValueFromNamedFile-78] - _ = x[ValueFromCLIArg-65] - _ = x[ValueFromEnvVar-69] - _ = x[ValueFromInput-73] - _ = x[ValueFromPlan-80] - _ = x[ValueFromCaller-83] -} - -const ( - _ValueSourceType_name_0 = "ValueFromUnknown" - _ValueSourceType_name_1 = "ValueFromCLIArg" - _ValueSourceType_name_2 = "ValueFromConfig" - _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" - _ValueSourceType_name_4 = "ValueFromInput" - _ValueSourceType_name_5 = "ValueFromNamedFile" - _ValueSourceType_name_6 = "ValueFromPlan" - _ValueSourceType_name_7 = "ValueFromCaller" -) - -var ( - _ValueSourceType_index_3 = [...]uint8{0, 15, 32} -) - -func (i ValueSourceType) String() string { - switch { - case i == 0: - return _ValueSourceType_name_0 - case i == 65: - return _ValueSourceType_name_1 - case i == 67: - return _ValueSourceType_name_2 - case 69 <= i && i <= 70: - i -= 69 - return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] - case i == 73: - return _ValueSourceType_name_4 - case i == 78: - return _ValueSourceType_name_5 - case i == 80: - return _ValueSourceType_name_6 - case i == 83: - return _ValueSourceType_name_7 - default: - return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go deleted file mode 100644 index 4ae9c92c..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go +++ /dev/null @@ -1,313 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// InputValue represents a value for a variable in the root module, provided -// as part of the definition of an operation. -type InputValue struct { - Value cty.Value - SourceType ValueSourceType - - // SourceRange provides source location information for values whose - // SourceType is either ValueFromConfig or ValueFromFile. It is not - // populated for other source types, and so should not be used. - SourceRange tfdiags.SourceRange -} - -// ValueSourceType describes what broad category of source location provided -// a particular value. -type ValueSourceType rune - -const ( - // ValueFromUnknown is the zero value of ValueSourceType and is not valid. - ValueFromUnknown ValueSourceType = 0 - - // ValueFromConfig indicates that a value came from a .tf or .tf.json file, - // e.g. the default value defined for a variable. - ValueFromConfig ValueSourceType = 'C' - - // ValueFromAutoFile indicates that a value came from a "values file", like - // a .tfvars file, that was implicitly loaded by naming convention. - ValueFromAutoFile ValueSourceType = 'F' - - // ValueFromNamedFile indicates that a value came from a named "values file", - // like a .tfvars file, that was passed explicitly on the command line (e.g. - // -var-file=foo.tfvars). - ValueFromNamedFile ValueSourceType = 'N' - - // ValueFromCLIArg indicates that the value was provided directly in - // a CLI argument. The name of this argument is not recorded and so it must - // be inferred from context. - ValueFromCLIArg ValueSourceType = 'A' - - // ValueFromEnvVar indicates that the value was provided via an environment - // variable. The name of the variable is not recorded and so it must be - // inferred from context. - ValueFromEnvVar ValueSourceType = 'E' - - // ValueFromInput indicates that the value was provided at an interactive - // input prompt. - ValueFromInput ValueSourceType = 'I' - - // ValueFromPlan indicates that the value was retrieved from a stored plan. - ValueFromPlan ValueSourceType = 'P' - - // ValueFromCaller indicates that the value was explicitly overridden by - // a caller to Context.SetVariable after the context was constructed. - ValueFromCaller ValueSourceType = 'S' -) - -func (v *InputValue) GoString() string { - if (v.SourceRange != tfdiags.SourceRange{}) { - return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange) - } else { - return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType) - } -} - -func (v ValueSourceType) GoString() string { - return fmt.Sprintf("terraform.%s", v) -} - -//go:generate go run golang.org/x/tools/cmd/stringer -type ValueSourceType - -// InputValues is a map of InputValue instances. -type InputValues map[string]*InputValue - -// InputValuesFromCaller turns the given map of naked values into an -// InputValues that attributes each value to "a caller", using the source -// type ValueFromCaller. This is primarily useful for testing purposes. -// -// This should not be used as a general way to convert map[string]cty.Value -// into InputValues, since in most real cases we want to set a suitable -// other SourceType and possibly SourceRange value. -func InputValuesFromCaller(vals map[string]cty.Value) InputValues { - ret := make(InputValues, len(vals)) - for k, v := range vals { - ret[k] = &InputValue{ - Value: v, - SourceType: ValueFromCaller, - } - } - return ret -} - -// Override merges the given value maps with the receiver, overriding any -// conflicting keys so that the latest definition wins. -func (vv InputValues) Override(others ...InputValues) InputValues { - // FIXME: This should check to see if any of the values are maps and - // merge them if so, in order to preserve the behavior from prior to - // Terraform 0.12. - ret := make(InputValues) - for k, v := range vv { - ret[k] = v - } - for _, other := range others { - for k, v := range other { - ret[k] = v - } - } - return ret -} - -// JustValues returns a map that just includes the values, discarding the -// source information. -func (vv InputValues) JustValues() map[string]cty.Value { - ret := make(map[string]cty.Value, len(vv)) - for k, v := range vv { - ret[k] = v.Value - } - return ret -} - -// DefaultVariableValues returns an InputValues map representing the default -// values specified for variables in the given configuration map. -func DefaultVariableValues(configs map[string]*configs.Variable) InputValues { - ret := make(InputValues) - for k, c := range configs { - if c.Default == cty.NilVal { - continue - } - ret[k] = &InputValue{ - Value: c.Default, - SourceType: ValueFromConfig, - SourceRange: tfdiags.SourceRangeFromHCL(c.DeclRange), - } - } - return ret -} - -// SameValues returns true if the given InputValues has the same values as -// the receiever, disregarding the source types and source ranges. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -func (vv InputValues) SameValues(other InputValues) bool { - if len(vv) != len(other) { - return false - } - - for k, v := range vv { - ov, exists := other[k] - if !exists { - return false - } - if !v.Value.RawEquals(ov.Value) { - return false - } - } - - return true -} - -// HasValues returns true if the reciever has the same values as in the given -// map, disregarding the source types and source ranges. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -func (vv InputValues) HasValues(vals map[string]cty.Value) bool { - if len(vv) != len(vals) { - return false - } - - for k, v := range vv { - oVal, exists := vals[k] - if !exists { - return false - } - if !v.Value.RawEquals(oVal) { - return false - } - } - - return true -} - -// Identical returns true if the given InputValues has the same values, -// source types, and source ranges as the receiver. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -// -// This method is primarily for testing. For most practical purposes, it's -// better to use SameValues or HasValues. -func (vv InputValues) Identical(other InputValues) bool { - if len(vv) != len(other) { - return false - } - - for k, v := range vv { - ov, exists := other[k] - if !exists { - return false - } - if !v.Value.RawEquals(ov.Value) { - return false - } - if v.SourceType != ov.SourceType { - return false - } - if v.SourceRange != ov.SourceRange { - return false - } - } - - return true -} - -// checkInputVariables ensures that variable values supplied at the UI conform -// to their corresponding declarations in configuration. -// -// The set of values is considered valid only if the returned diagnostics -// does not contain errors. A valid set of values may still produce warnings, -// which should be returned to the user. -func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - for name, vc := range vcs { - val, isSet := vs[name] - if !isSet { - // Always an error, since the caller should already have included - // default values from the configuration in the values map. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unassigned variable", - fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name), - )) - continue - } - - wantType := vc.Type - - // A given value is valid if it can convert to the desired type. - _, err := convert.Convert(val.Value, wantType) - if err != nil { - switch val.SourceType { - case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: - // We have source location information for these. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid value for input variable", - Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err), - Subject: val.SourceRange.ToHCL().Ptr(), - }) - case ValueFromEnvVar: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err), - )) - case ValueFromCLIArg: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err), - )) - case ValueFromInput: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err), - )) - default: - // The above gets us good coverage for the situations users - // are likely to encounter with their own inputs. The other - // cases are generally implementation bugs, so we'll just - // use a generic error for these. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err), - )) - } - } - } - - // Check for any variables that are assigned without being configured. - // This is always an implementation error in the caller, because we - // expect undefined variables to be caught during context construction - // where there is better context to report it well. - for name := range vs { - if _, defined := vcs[name]; !defined { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Value assigned to undeclared variable", - fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name), - )) - } - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go deleted file mode 100644 index 4cc3bbba..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - - tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" -) - -// CheckCoreVersionRequirements visits each of the modules in the given -// configuration tree and verifies that any given Core version constraints -// match with the version of Terraform Core that is being used. -// -// The returned diagnostics will contain errors if any constraints do not match. -// The returned diagnostics might also return warnings, which should be -// displayed to the user. -func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { - if config == nil { - return nil - } - - var diags tfdiags.Diagnostics - module := config.Module - - for _, constraint := range module.CoreVersionConstraints { - if !constraint.Required.Check(tfversion.SemVer) { - switch { - case len(config.Path) == 0: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - tfversion.String(), - ), - Subject: &constraint.DeclRange, - }) - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - config.Path, config.SourceAddr, tfversion.String(), - ), - Subject: &constraint.DeclRange, - }) - } - } - } - - for _, c := range config.Children { - childDiags := CheckCoreVersionRequirements(c) - diags = diags.Append(childDiags) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go deleted file mode 100644 index 0666aa5f..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[walkInvalid-0] - _ = x[walkApply-1] - _ = x[walkPlan-2] - _ = x[walkPlanDestroy-3] - _ = x[walkRefresh-4] - _ = x[walkValidate-5] - _ = x[walkDestroy-6] - _ = x[walkImport-7] - _ = x[walkEval-8] -} - -const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval" - -var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95} - -func (i walkOperation) String() string { - if i >= walkOperation(len(_walkOperation_index)-1) { - return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE similarity index 100% rename from vendor/github.com/hashicorp/hcl/LICENSE rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/doc.go new file mode 100644 index 00000000..b4baab1b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/doc.go @@ -0,0 +1,24 @@ +// Package acctest provides the ability to use the binary test driver. The binary +// test driver allows you to run your acceptance tests with a binary of Terraform. +// This is currently the only mechanism for driving tests. It provides a realistic testing +// experience and matrix testing against multiple versions of Terraform CLI, +// as long as they are >= 0.12.0 +// +// The driver must be enabled by initialising the test helper in your TestMain +// function in all provider packages that run acceptance tests. Most providers have only +// one package. +// +// After importing this package, you must define a TestMain and have the following: +// +// func TestMain(m *testing.M) { +// acctest.UseBinaryDriver("provider_name", Provider) +// resource.TestMain(m) +// } +// +// Where `Provider` is the function that returns the instance of a configured `*schema.Provider` +// Some providers already have a TestMain defined, usually for the purpose of enabling test +// sweepers. These additional occurrences should be removed. +// +// It is no longer necessary to import other Terraform providers as Go modules: these +// imports should be removed. +package acctest diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/acctest/helper.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/helper.go similarity index 89% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/acctest/helper.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/helper.go index 050b9741..9853512d 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/acctest/helper.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/helper.go @@ -3,7 +3,7 @@ package acctest import ( "os" - "github.com/hashicorp/terraform-plugin-sdk/plugin" + "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" tftest "github.com/hashicorp/terraform-plugin-test" ) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go new file mode 100644 index 00000000..d6abbd1a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go @@ -0,0 +1,112 @@ +package diag + +import ( + "errors" + "fmt" + + "github.com/hashicorp/go-cty/cty" +) + +// Diagnostics is a collection of Diagnostic. +// +// Developers should append and build the list of diagnostics up until a fatal +// error is reached, at which point they should return the Diagnostics to the +// SDK. +type Diagnostics []Diagnostic + +// HasError returns true is Diagnostics contains an instance of +// Severity == Error. +// +// This helper aims to mimic the go error practices of if err != nil. After any +// operation that returns Diagnostics, check that it HasError and bubble up the +// stack. +func (diags Diagnostics) HasError() bool { + for i := range diags { + if diags[i].Severity == Error { + return true + } + } + return false +} + +// Diagnostic is a contextual message intended at outlining problems in user +// configuration. +// +// It supports multiple levels of severity (Error or Warning), a short Summary +// of the problem, an optional longer Detail message that can assist the user in +// fixing the problem, as well as an AttributePath representation which +// Terraform uses to indicate where the issue took place in the user's +// configuration. +// +// A Diagnostic will typically be used to pinpoint a problem with user +// configuration, however it can still be used to present warnings or errors +// to the user without any AttributePath set. +type Diagnostic struct { + // Severity indicates the level of the Diagnostic. Currently can be set to + // either Error or Warning + Severity Severity + + // Summary is a short description of the problem, rendered above location + // information + Summary string + + // Detail is an optional second message rendered below location information + // typically used to communicate a potential fix to the user. + Detail string + + // AttributePath is a representation of the path starting from the root of + // block (resource, datasource, provider) under evaluation by the SDK, to + // the attribute that the Diagnostic should be associated to. Terraform will + // use this information to render information on where the problem took + // place in the user's configuration. + // + // It is represented with cty.Path, which is a list of steps of either + // cty.GetAttrStep (an actual attribute) or cty.IndexStep (a step with Key + // of cty.StringVal for map indexes, and cty.NumberVal for list indexes). + // + // PLEASE NOTE: While cty can support indexing into sets, the SDK and + // protocol currently do not. For any Diagnostic related to a schema.TypeSet + // or a child of that type, please terminate the path at the schema.TypeSet + // and opt for more verbose Summary and Detail to help guide the user. + // + // Validity of the AttributePath is currently the responsibility of the + // developer, Terraform should render the root block (provider, resource, + // datasource) in cases where the attribute path is invalid. + AttributePath cty.Path +} + +// Validate ensures a valid Severity and a non-empty Summary are set. +func (d Diagnostic) Validate() error { + var validSev bool + for _, sev := range severities { + if d.Severity == sev { + validSev = true + break + } + } + if !validSev { + return fmt.Errorf("invalid severity: %v", d.Severity) + } + if d.Summary == "" { + return errors.New("empty summary") + } + return nil +} + +// FromErr will convert an error into a Diagnostic +func FromErr(err error) Diagnostic { + return Diagnostic{ + Severity: Error, + Summary: err.Error(), + } +} + +// Severity is an enum type marking the severity level of a Diagnostic +type Severity int + +const ( + Error Severity = iota + Warning +) + +var severities = []Severity{Error, Warning} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/compose.go similarity index 88% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/compose.go index b0919995..9b1b929d 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/compose.go @@ -1,8 +1,11 @@ package customdiff import ( + "context" + "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // All returns a CustomizeDiffFunc that runs all of the given @@ -42,10 +45,10 @@ import ( // } // func All(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { var err error for _, f := range funcs { - thisErr := f(d, meta) + thisErr := f(ctx, d, meta) if thisErr != nil { err = multierror.Append(err, thisErr) } @@ -60,9 +63,9 @@ func All(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { // // If all functions succeed, the combined function also succeeds. func Sequence(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { for _, f := range funcs { - err := f(d, meta) + err := f(ctx, d, meta) if err != nil { return err } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/computed.go similarity index 62% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/computed.go index 54ea5c40..f47d11a4 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/computed.go @@ -1,14 +1,16 @@ package customdiff import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // ComputedIf returns a CustomizeDiffFunc that sets the given key's new value // as computed if the given condition function returns true. func ComputedIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if f(d, meta) { + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + if f(ctx, d, meta) { d.SetNewComputed(key) } return nil diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/condition.go similarity index 69% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/condition.go index 1d8e2bfd..e85cdc7a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/condition.go @@ -1,20 +1,22 @@ package customdiff import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // ResourceConditionFunc is a function type that makes a boolean decision based // on an entire resource diff. -type ResourceConditionFunc func(d *schema.ResourceDiff, meta interface{}) bool +type ResourceConditionFunc func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) bool // ValueChangeConditionFunc is a function type that makes a boolean decision // by comparing two values. -type ValueChangeConditionFunc func(old, new, meta interface{}) bool +type ValueChangeConditionFunc func(ctx context.Context, old, new, meta interface{}) bool // ValueConditionFunc is a function type that makes a boolean decision based // on a given value. -type ValueConditionFunc func(value, meta interface{}) bool +type ValueConditionFunc func(ctx context.Context, value, meta interface{}) bool // If returns a CustomizeDiffFunc that calls the given condition // function and then calls the given CustomizeDiffFunc only if the condition @@ -26,9 +28,9 @@ type ValueConditionFunc func(value, meta interface{}) bool // a conditional branch if the given CustomizeDiffFunc is already a // locally-defined function, since this avoids obscuring the control flow. func If(cond ResourceConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if cond(d, meta) { - return f(d, meta) + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + if cond(ctx, d, meta) { + return f(ctx, d, meta) } return nil } @@ -38,10 +40,10 @@ func If(cond ResourceConditionFunc, f schema.CustomizeDiffFunc) schema.Customize // function with the old and new values of the given key and then calls the // given CustomizeDiffFunc only if the condition function returns true. func IfValueChange(key string, cond ValueChangeConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { old, new := d.GetChange(key) - if cond(old, new, meta) { - return f(d, meta) + if cond(ctx, old, new, meta) { + return f(ctx, d, meta) } return nil } @@ -51,9 +53,9 @@ func IfValueChange(key string, cond ValueChangeConditionFunc, f schema.Customize // function with the new values of the given key and then calls the // given CustomizeDiffFunc only if the condition function returns true. func IfValue(key string, cond ValueConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if cond(d.Get(key), meta) { - return f(d, meta) + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + if cond(ctx, d.Get(key), meta) { + return f(ctx, d, meta) } return nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/doc.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/doc.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/force_new.go similarity index 80% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/force_new.go index 26afa8cb..0ffc0886 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/force_new.go @@ -1,7 +1,9 @@ package customdiff import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // ForceNewIf returns a CustomizeDiffFunc that flags the given key as @@ -11,8 +13,8 @@ import ( // values of the field compare equal, since no attribute diff is generated in // that case. func ForceNewIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if f(d, meta) { + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + if f(ctx, d, meta) { d.ForceNew(key) } return nil @@ -30,9 +32,9 @@ func ForceNewIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { // and explicit code in the common case where the decision can be made with // only the specific field value. func ForceNewIfChange(key string, f ValueChangeConditionFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { old, new := d.GetChange(key) - if f(old, new, meta) { + if f(ctx, old, new, meta) { d.ForceNew(key) } return nil diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/validate.go similarity index 71% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/validate.go index 0bc2c695..5b88e5e0 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/validate.go @@ -1,24 +1,26 @@ package customdiff import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // ValueChangeValidationFunc is a function type that validates the difference // (or lack thereof) between two values, returning an error if the change // is invalid. -type ValueChangeValidationFunc func(old, new, meta interface{}) error +type ValueChangeValidationFunc func(ctx context.Context, old, new, meta interface{}) error // ValueValidationFunc is a function type that validates a particular value, // returning an error if the value is invalid. -type ValueValidationFunc func(value, meta interface{}) error +type ValueValidationFunc func(ctx context.Context, value, meta interface{}) error // ValidateChange returns a CustomizeDiffFunc that applies the given validation // function to the change for the given key, returning any error produced. func ValidateChange(key string, f ValueChangeValidationFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { old, new := d.GetChange(key) - return f(old, new, meta) + return f(ctx, old, new, meta) } } @@ -31,8 +33,8 @@ func ValidateChange(key string, f ValueChangeValidationFunc) schema.CustomizeDif // together makes intent clearer than spreading that validation across the // schema. func ValidateValue(key string, f ValueValidationFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { val := d.Get(key) - return f(val, meta) + return f(ctx, val, meta) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/hashcode/hashcode.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/hashcode/hashcode.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/id.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/id.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/id.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/id.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state.go similarity index 87% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state.go index 88a83966..17cfb49e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state.go @@ -1,6 +1,7 @@ package resource import ( + "context" "log" "time" ) @@ -13,7 +14,7 @@ var refreshGracePeriod = 30 * time.Second // It returns three results. `result` is any object that will be returned // as the final object after waiting for state change. This allows you to // return the final updated object, for example an EC2 instance after refreshing -// it. +// it. A nil result represents not found. // // `state` is the latest state of that object. And `err` is any error that // may have happened while refreshing the state. @@ -28,13 +29,13 @@ type StateChangeConf struct { Timeout time.Duration // The amount of time to wait before timeout MinTimeout time.Duration // Smallest time to wait before refreshes PollInterval time.Duration // Override MinTimeout/backoff and only poll this often - NotFoundChecks int // Number of times to allow not found + NotFoundChecks int // Number of times to allow not found (nil result from Refresh) // This is to work around inconsistent APIs ContinuousTargetOccurence int // Number of times the Target state has to occur continuously } -// WaitForState watches an object and waits for it to achieve the state +// WaitForStateContext watches an object and waits for it to achieve the state // specified in the configuration using the specified Refresh() func, // waiting the number of seconds specified in the timeout configuration. // @@ -48,7 +49,9 @@ type StateChangeConf struct { // // Otherwise, the result is the result of the first call to the Refresh function to // reach the target state. -func (conf *StateChangeConf) WaitForState() (interface{}, error) { +// +// Cancellation from the passed in context will cancel the refresh loop +func (conf *StateChangeConf) WaitForStateContext(ctx context.Context) (interface{}, error) { log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) notfoundTick := 0 @@ -80,7 +83,11 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { go func() { defer close(resCh) - time.Sleep(conf.Delay) + select { + case <-time.After(conf.Delay): + case <-cancelCh: + return + } // start with 0 delay for the first loop var wait time.Duration @@ -213,7 +220,9 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { // still waiting, store the last result lastResult = r - + case <-ctx.Done(): + close(cancelCh) + return nil, ctx.Err() case <-timeout: log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) @@ -242,6 +251,9 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { // target state not reached, save the result for the // TimeoutError and wait for the channel to close lastResult = r + case <-ctx.Done(): + log.Println("[ERROR] Context cancelation detected, abandoning grace period") + break forSelect case <-timeout: log.Println("[ERROR] WaitForState exceeded refresh grace period") break forSelect @@ -257,3 +269,13 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { } } } + +// WaitForState watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// Deprecated: Please use WaitForStateContext to ensure proper plugin shutdown +func (conf *StateChangeConf) WaitForState() (interface{}, error) { + log.Printf("[WARN] WaitForState is deprecated, please use WaitForStateContext") + return conf.WaitForStateContext(context.Background()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go similarity index 54% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go index 768b4847..42c7b640 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go @@ -1,192 +1,15 @@ package resource import ( - "encoding/json" "fmt" "strconv" tfjson "github.com/hashicorp/terraform-json" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/zclconf/go-cty/cty" -) - -// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests -func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) { - state := terraform.NewState() - - // in the odd case of a nil state, let the helper packages handle it - if newState == nil { - return nil, nil - } - - for _, newMod := range newState.Modules { - mod := state.AddModule(newMod.Addr) - - for name, out := range newMod.OutputValues { - outputType := "" - val := hcl2shim.ConfigValueFromHCL2(out.Value) - ty := out.Value.Type() - switch { - case ty == cty.String: - outputType = "string" - case ty.IsTupleType() || ty.IsListType(): - outputType = "list" - case ty.IsMapType(): - outputType = "map" - } - - mod.Outputs[name] = &terraform.OutputState{ - Type: outputType, - Value: val, - Sensitive: out.Sensitive, - } - } - - for _, res := range newMod.Resources { - resType := res.Addr.Type - providerType := res.ProviderConfig.ProviderConfig.Type - - resource := getResource(providers, providerType, res.Addr) - - for key, i := range res.Instances { - resState := &terraform.ResourceState{ - Type: resType, - Provider: res.ProviderConfig.String(), - } - - // We should always have a Current instance here, but be safe about checking. - if i.Current != nil { - flatmap, err := shimmedAttributes(i.Current, resource) - if err != nil { - return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) - } - - var meta map[string]interface{} - if i.Current.Private != nil { - err := json.Unmarshal(i.Current.Private, &meta) - if err != nil { - return nil, err - } - } - - resState.Primary = &terraform.InstanceState{ - ID: flatmap["id"], - Attributes: flatmap, - Tainted: i.Current.Status == states.ObjectTainted, - Meta: meta, - } - - if i.Current.SchemaVersion != 0 { - if resState.Primary.Meta == nil { - resState.Primary.Meta = map[string]interface{}{} - } - resState.Primary.Meta["schema_version"] = i.Current.SchemaVersion - } - - for _, dep := range i.Current.Dependencies { - resState.Dependencies = append(resState.Dependencies, dep.String()) - } - - // convert the indexes to the old style flapmap indexes - idx := "" - switch key.(type) { - case addrs.IntKey: - // don't add numeric index values to resources with a count of 0 - if len(res.Instances) > 1 { - idx = fmt.Sprintf(".%d", key) - } - case addrs.StringKey: - idx = "." + key.String() - } - - mod.Resources[res.Addr.String()+idx] = resState - } - - // add any deposed instances - for _, dep := range i.Deposed { - flatmap, err := shimmedAttributes(dep, resource) - if err != nil { - return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) - } - - var meta map[string]interface{} - if dep.Private != nil { - err := json.Unmarshal(dep.Private, &meta) - if err != nil { - return nil, err - } - } - - deposed := &terraform.InstanceState{ - ID: flatmap["id"], - Attributes: flatmap, - Tainted: dep.Status == states.ObjectTainted, - Meta: meta, - } - if dep.SchemaVersion != 0 { - deposed.Meta = map[string]interface{}{ - "schema_version": dep.SchemaVersion, - } - } - - resState.Deposed = append(resState.Deposed, deposed) - } - } - } - } - - return state, nil -} - -func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource { - p := providers[providerName] - if p == nil { - panic(fmt.Sprintf("provider %q not found in test step", providerName)) - } - // this is only for tests, so should only see schema.Providers - provider := p.(*schema.Provider) - - switch addr.Mode { - case addrs.ManagedResourceMode: - resource := provider.ResourcesMap[addr.Type] - if resource != nil { - return resource - } - case addrs.DataResourceMode: - resource := provider.DataSourcesMap[addr.Type] - if resource != nil { - return resource - } - } - - panic(fmt.Sprintf("resource %s not found in test step", addr.Type)) -} - -func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) { - flatmap := instance.AttrsFlat - if flatmap != nil { - return flatmap, nil - } - - // if we have json attrs, they need to be decoded - rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType()) - if err != nil { - return nil, err - } - - instanceState, err := res.ShimInstanceStateFromValue(rio.Value) - if err != nil { - return nil, err - } - - return instanceState.Attributes, nil -} + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) type shimmedState struct { state *terraform.State diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go similarity index 66% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go index 7b331d80..6f74bcdc 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go @@ -1,7 +1,7 @@ package resource import ( - "bytes" + "context" "errors" "flag" "fmt" @@ -9,29 +9,21 @@ import ( "io/ioutil" "log" "os" - "path/filepath" - "reflect" "regexp" + "strconv" "strings" "syscall" - "testing" - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/errwrap" "github.com/hashicorp/go-multierror" "github.com/hashicorp/logutils" - "github.com/hashicorp/terraform-plugin-sdk/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/logging" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/command/format" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload" - "github.com/hashicorp/terraform-plugin-sdk/internal/initwd" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/mitchellh/colorstring" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/diagutils" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // flagSweep is a flag available when running tests on the command line. It @@ -95,7 +87,9 @@ func AddTestSweepers(name string, s *Sweeper) { sweeperFuncs[name] = s } -func TestMain(m *testing.M) { +func TestMain(m interface { + Run() int +}) { flag.Parse() if *flagSweep != "" { // parse flagSweep contents for regions to run @@ -108,10 +102,17 @@ func TestMain(m *testing.M) { os.Exit(1) } } else { - if acctest.TestHelper != nil { - defer acctest.TestHelper.Close() + if acctest.TestHelper == nil { + log.Fatal("Please configure the acctest binary driver") } - os.Exit(m.Run()) + + exitCode := m.Run() + err := acctest.TestHelper.Close() + if err != nil { + log.Printf("Error cleaning up temporary test files: %s", err) + } + + os.Exit(exitCode) } } @@ -219,20 +220,22 @@ func filterSweeperWithDependencies(name string, source map[string]*Sweeper) map[ // add the success/fail status to the sweeperRunList. func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweeper, sweeperRunList map[string]error, allowFailures bool) error { for _, dep := range s.Dependencies { - if depSweeper, ok := sweepers[dep]; ok { - log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) - err := runSweeperWithRegion(region, depSweeper, sweepers, sweeperRunList, allowFailures) + depSweeper, ok := sweepers[dep] - if err != nil { - if allowFailures { - log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", depSweeper.Name, region, err) - continue - } + if !ok { + return fmt.Errorf("sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + } - return err + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) + err := runSweeperWithRegion(region, depSweeper, sweepers, sweeperRunList, allowFailures) + + if err != nil { + if allowFailures { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", depSweeper.Name, region, err) + continue } - } else { - log.Printf("[WARN] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + + return err } } @@ -254,14 +257,7 @@ func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweepe return runE } -const TestEnvVar = "TF_ACC" - -// TestProvider can be implemented by any ResourceProvider to provide custom -// reset functionality at the start of an acceptance test. -// The helper/schema Provider implements this interface. -type TestProvider interface { - TestReset() error -} +const testEnvVar = "TF_ACC" // TestCheckFunc is the callback type used with acceptance tests to check // the state of a resource. The state passed in is the latest state known, @@ -302,8 +298,8 @@ type TestCase struct { // // The end effect of each is the same: specifying the providers that // are used within the tests. - Providers map[string]terraform.ResourceProvider - ProviderFactories map[string]terraform.ResourceProviderFactory + Providers map[string]*schema.Provider + ProviderFactories map[string]func() (*schema.Provider, error) // PreventPostDestroyRefresh can be set to true for cases where data sources // are tested alongside real resources @@ -328,11 +324,6 @@ type TestCase struct { // IDRefreshIgnore is a list of configuration keys that will be ignored. IDRefreshName string IDRefreshIgnore []string - - // DisableBinaryDriver forces this test case to run using the legacy test - // driver, even if the binary test driver has been enabled. - // This property will be removed in version 2.0.0 of the SDK. - DisableBinaryDriver bool } // TestStep is a single apply sequence of a test, done within the @@ -466,13 +457,13 @@ type TestStep struct { // provider s is used internally to maintain a reference to the // underlying providers during the tests - providers map[string]terraform.ResourceProvider + providers map[string]*schema.Provider } // Set to a file mask in sprintf format where %s is test name -const EnvLogPathMask = "TF_LOG_PATH_MASK" +const envLogPathMask = "TF_LOG_PATH_MASK" -func LogOutput(t TestT) (logOutput io.Writer, err error) { +func logOutput(t testing.T) (logOutput io.Writer, err error) { logOutput = ioutil.Discard logLevel := logging.LogLevel() @@ -490,7 +481,7 @@ func LogOutput(t TestT) (logOutput io.Writer, err error) { } } - if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { + if logPathMask := os.Getenv(envLogPathMask); logPathMask != "" { // Escape special characters which may appear if we have subtests testName := strings.Replace(t.Name(), "/", "__", -1) @@ -518,7 +509,7 @@ func LogOutput(t TestT) (logOutput io.Writer, err error) { // Tests will fail if they do not properly handle conditions to allow multiple // tests to occur against the same resource or service (e.g. random naming). // All other requirements of the Test function also apply to this function. -func ParallelTest(t TestT, c TestCase) { +func ParallelTest(t testing.T, c TestCase) { t.Parallel() Test(t, c) } @@ -533,210 +524,57 @@ func ParallelTest(t TestT, c TestCase) { // the "-test.v" flag) is set. Because some acceptance tests take quite // long, we require the verbose flag so users are able to see progress // output. -func Test(t TestT, c TestCase) { +func Test(t testing.T, c TestCase) { // We only run acceptance tests if an env var is set because they're // slow and generally require some outside configuration. You can opt out // of this with OverrideEnvVar on individual TestCases. - if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { + if os.Getenv(testEnvVar) == "" && !c.IsUnitTest { t.Skip(fmt.Sprintf( "Acceptance tests skipped unless env '%s' set", - TestEnvVar)) + testEnvVar)) return } - logWriter, err := LogOutput(t) + logWriter, err := logOutput(t) if err != nil { t.Error(fmt.Errorf("error setting up logging: %s", err)) } log.SetOutput(logWriter) - // We require verbose mode so that the user knows what is going on. - if !testTesting && !testing.Verbose() && !c.IsUnitTest { - t.Fatal("Acceptance tests must be run with the -v flag on tests") - return - } - // get instances of all providers, so we can use the individual // resources to shim the state during the tests. - providers := make(map[string]terraform.ResourceProvider) - for name, pf := range testProviderFactories(c) { + providers := make(map[string]*schema.Provider) + for name, pf := range c.ProviderFactories { p, err := pf() if err != nil { t.Fatal(err) } providers[name] = p } - - if acctest.TestHelper != nil && c.DisableBinaryDriver == false { - // auto-configure all providers - for _, p := range providers { - err = p.Configure(terraform.NewResourceConfigRaw(nil)) - if err != nil { - t.Fatal(err) - } - } - - // Run the PreCheck if we have it. - // This is done after the auto-configure to allow providers - // to override the default auto-configure parameters. - if c.PreCheck != nil { - c.PreCheck() - } - - // inject providers for ImportStateVerify - RunNewTest(t.(*testing.T), c, providers) - return - } else { - // run the PreCheck if we have it - if c.PreCheck != nil { - c.PreCheck() - } - } - - providerResolver, err := testProviderResolver(c) - if err != nil { - t.Fatal(err) + for name, p := range c.Providers { + providers[name] = p } - opts := terraform.ContextOpts{ProviderResolver: providerResolver} - - // A single state variable to track the lifecycle, starting with no state - var state *terraform.State - - // Go through each step and run it - var idRefreshCheck *terraform.ResourceState - idRefresh := c.IDRefreshName != "" - errored := false - for i, step := range c.Steps { - // insert the providers into the step so we can get the resources for - // shimming the state - step.providers = providers - - var err error - log.Printf("[DEBUG] Test: Executing step %d", i) - - if step.SkipFunc != nil { - skip, err := step.SkipFunc() - if err != nil { - t.Fatal(err) - } - if skip { - log.Printf("[WARN] Skipping step %d", i) - continue - } - } - - if step.Config == "" && !step.ImportState { - err = fmt.Errorf( - "unknown test mode for step. Please see TestStep docs\n\n%#v", - step) - } else { - if step.ImportState { - if step.Config == "" { - step.Config = testProviderConfig(c) - } - - // Can optionally set step.Config in addition to - // step.ImportState, to provide config for the import. - state, err = testStepImportState(opts, state, step) - } else { - state, err = testStepConfig(opts, state, step) - } - } - - // If we expected an error, but did not get one, fail - if err == nil && step.ExpectError != nil { - errored = true - t.Error(fmt.Sprintf( - "Step %d, no error received, but expected a match to:\n\n%s\n\n", - i, step.ExpectError)) - break - } - - // If there was an error, exit - if err != nil { - // Perhaps we expected an error? Check if it matches - if step.ExpectError != nil { - if !step.ExpectError.MatchString(err.Error()) { - errored = true - t.Error(fmt.Sprintf( - "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n", - i, err, step.ExpectError)) - break - } - } else { - errored = true - t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err))) - break - } - } - - // If we've never checked an id-only refresh and our state isn't - // empty, find the first resource and test it. - if idRefresh && idRefreshCheck == nil && !state.Empty() { - // Find the first non-nil resource in the state - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.IDRefreshName]; ok { - idRefreshCheck = v - } - - break - } - } - - // If we have an instance to check for refreshes, do it - // immediately. We do it in the middle of another test - // because it shouldn't affect the overall state (refresh - // is read-only semantically) and we want to fail early if - // this fails. If refresh isn't read-only, then this will have - // caught a different bug. - if idRefreshCheck != nil { - log.Printf( - "[WARN] Test: Running ID-only refresh check on %s", - idRefreshCheck.Primary.ID) - if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil { - log.Printf("[ERROR] Test: ID-only test failed: %s", err) - t.Error(fmt.Sprintf( - "[ERROR] Test: ID-only test failed: %s", err)) - break - } - } + // Auto-configure all providers. + for _, p := range providers { + diags := p.Configure(context.Background(), terraform.NewResourceConfigRaw(nil)) + if diags.HasError() { + t.Fatal("error configuring provider: %s", diagutils.ErrorDiags(diags)) } } - // If we never checked an id-only refresh, it is a failure. - if idRefresh { - if !errored && len(c.Steps) > 0 && idRefreshCheck == nil { - t.Error("ID-only refresh check never ran.") - } + // Run the PreCheck if we have it. + // This is done after the auto-configure to allow providers + // to override the default auto-configure parameters. + if c.PreCheck != nil { + c.PreCheck() } - // If we have a state, then run the destroy - if state != nil { - lastStep := c.Steps[len(c.Steps)-1] - destroyStep := TestStep{ - Config: lastStep.Config, - Check: c.CheckDestroy, - Destroy: true, - PreventDiskCleanup: lastStep.PreventDiskCleanup, - PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, - providers: providers, - } - - log.Printf("[WARN] Test: Executing destroy step") - state, err := testStep(opts, state, destroyStep) - if err != nil { - t.Error(fmt.Sprintf( - "Error destroying resource! WARNING: Dangling resources\n"+ - "may exist. The full state and error is shown below.\n\n"+ - "Error: %s\n\nState: %s", - err, - state)) - } - } else { - log.Printf("[WARN] Skipping destroy test since there is no state.") + if acctest.TestHelper == nil { + t.Fatal("Please configure the acctest binary driver") } + + runNewTest(t, c, providers) } // testProviderConfig takes the list of Providers in a TestCase and returns a @@ -751,210 +589,14 @@ func testProviderConfig(c TestCase) string { return strings.Join(lines, "") } -// testProviderFactories combines the fixed Providers and -// ResourceProviderFactory functions into a single map of -// ResourceProviderFactory functions. -func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory { - ctxProviders := make(map[string]terraform.ResourceProviderFactory) - for k, pf := range c.ProviderFactories { - ctxProviders[k] = pf - } - - // add any fixed providers - for k, p := range c.Providers { - ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) - } - return ctxProviders -} - -// testProviderResolver is a helper to build a ResourceProviderResolver -// with pre instantiated ResourceProviders, so that we can reset them for the -// test, while only calling the factory function once. -// Any errors are stored so that they can be returned by the factory in -// terraform to match non-test behavior. -func testProviderResolver(c TestCase) (providers.Resolver, error) { - ctxProviders := testProviderFactories(c) - - // wrap the old provider factories in the test grpc server so they can be - // called from terraform. - newProviders := make(map[string]providers.Factory) - - for k, pf := range ctxProviders { - factory := pf // must copy to ensure each closure sees its own value - newProviders[k] = func() (providers.Interface, error) { - p, err := factory() - if err != nil { - return nil, err - } - - // The provider is wrapped in a GRPCTestProvider so that it can be - // passed back to terraform core as a providers.Interface, rather - // than the legacy ResourceProvider. - return GRPCTestProvider(p), nil - } - } - - return providers.ResolverFixed(newProviders), nil -} - // UnitTest is a helper to force the acceptance testing harness to run in the // normal unit test suite. This should only be used for resource that don't // have any external dependencies. -func UnitTest(t TestT, c TestCase) { +func UnitTest(t testing.T, c TestCase) { c.IsUnitTest = true Test(t, c) } -func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error { - // TODO: We guard by this right now so master doesn't explode. We - // need to remove this eventually to make this part of the normal tests. - if os.Getenv("TF_ACC_IDONLY") == "" { - return nil - } - - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: r.Type, - Name: "foo", - }.Instance(addrs.NoKey) - absAddr := addr.Absolute(addrs.RootModuleInstance) - - // Build the state. The state is just the resource with an ID. There - // are no attributes. We only set what is needed to perform a refresh. - state := states.NewState() - state.RootModule().SetResourceInstanceCurrent( - addr, - &states.ResourceInstanceObjectSrc{ - AttrsFlat: r.Primary.Attributes, - Status: states.ObjectReady, - }, - addrs.ProviderConfig{Type: "placeholder"}.Absolute(addrs.RootModuleInstance), - ) - - // Create the config module. We use the full config because Refresh - // doesn't have access to it and we may need things like provider - // configurations. The initial implementation of id-only checks used - // an empty config module, but that caused the aforementioned problems. - cfg, err := testConfig(opts, step) - if err != nil { - return err - } - - // Initialize the context - opts.Config = cfg - opts.State = state - ctx, ctxDiags := terraform.NewContext(&opts) - if ctxDiags.HasErrors() { - return ctxDiags.Err() - } - if diags := ctx.Validate(); len(diags) > 0 { - if diags.HasErrors() { - return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) - } - - log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) - } - - // Refresh! - state, refreshDiags := ctx.Refresh() - if refreshDiags.HasErrors() { - return refreshDiags.Err() - } - - // Verify attribute equivalence. - actualR := state.ResourceInstance(absAddr) - if actualR == nil { - return fmt.Errorf("Resource gone!") - } - if actualR.Current == nil { - return fmt.Errorf("Resource has no primary instance") - } - actual := actualR.Current.AttrsFlat - expected := r.Primary.Attributes - // Remove fields we're ignoring - for _, v := range c.IDRefreshIgnore { - for k := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return fmt.Errorf( - "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - - return nil -} - -func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) { - if step.PreConfig != nil { - step.PreConfig() - } - - cfgPath, err := ioutil.TempDir("", "tf-test") - if err != nil { - return nil, fmt.Errorf("Error creating temporary directory for config: %s", err) - } - - if step.PreventDiskCleanup { - log.Printf("[INFO] Skipping defer os.RemoveAll call") - } else { - defer os.RemoveAll(cfgPath) - } - - // Write the main configuration file - err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm) - if err != nil { - return nil, fmt.Errorf("Error creating temporary file for config: %s", err) - } - - // Create directory for our child modules, if any. - modulesDir := filepath.Join(cfgPath, ".modules") - err = os.Mkdir(modulesDir, os.ModePerm) - if err != nil { - return nil, fmt.Errorf("Error creating child modules directory: %s", err) - } - - inst := initwd.NewModuleInstaller(modulesDir, nil) - _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{}) - if installDiags.HasErrors() { - return nil, installDiags.Err() - } - - loader, err := configload.NewLoader(&configload.Config{ - ModulesDir: modulesDir, - }) - if err != nil { - return nil, fmt.Errorf("failed to create config loader: %s", err) - } - - config, configDiags := loader.LoadConfig(cfgPath) - if configDiags.HasErrors() { - return nil, configDiags - } - - return config, nil -} - func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { if c.ResourceName == "" { return nil, fmt.Errorf("ResourceName must be set in TestStep") @@ -1016,28 +658,28 @@ func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { // testing that computed values were set, when it is not possible to // know ahead of time what the values will be. func TestCheckResourceAttrSet(name, key string) TestCheckFunc { - return func(s *terraform.State) error { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) if err != nil { return err } return testCheckResourceAttrSet(is, name, key) - } + }) } // TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with // support for non-root modules func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := modulePathPrimaryInstanceState(s, mpt, name) if err != nil { return err } return testCheckResourceAttrSet(is, name, key) - } + }) } func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { @@ -1051,28 +693,28 @@ func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key stri // TestCheckResourceAttr is a TestCheckFunc which validates // the value in state for the given name/key combination. func TestCheckResourceAttr(name, key, value string) TestCheckFunc { - return func(s *terraform.State) error { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) if err != nil { return err } return testCheckResourceAttr(is, name, key, value) - } + }) } // TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with // support for non-root modules func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := modulePathPrimaryInstanceState(s, mpt, name) if err != nil { return err } return testCheckResourceAttr(is, name, key, value) - } + }) } func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { @@ -1106,28 +748,28 @@ func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, // TestCheckNoResourceAttr is a TestCheckFunc which ensures that // NO value exists in state for the given name/key combination. func TestCheckNoResourceAttr(name, key string) TestCheckFunc { - return func(s *terraform.State) error { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) if err != nil { return err } return testCheckNoResourceAttr(is, name, key) - } + }) } // TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with // support for non-root modules func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := modulePathPrimaryInstanceState(s, mpt, name) if err != nil { return err } return testCheckNoResourceAttr(is, name, key) - } + }) } func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { @@ -1154,28 +796,28 @@ func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key strin // TestMatchResourceAttr is a TestCheckFunc which checks that the value // in state for the given name/key combination matches the given regex. func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { - return func(s *terraform.State) error { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := primaryInstanceState(s, name) if err != nil { return err } return testMatchResourceAttr(is, name, key, r) - } + }) } // TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with // support for non-root modules func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { is, err := modulePathPrimaryInstanceState(s, mpt, name) if err != nil { return err } return testMatchResourceAttr(is, name, key, r) - } + }) } func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { @@ -1211,7 +853,7 @@ func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value // TestCheckResourceAttrPair is a TestCheckFunc which validates that the values // in state for a pair of name/key combinations are equal. func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { - return func(s *terraform.State) error { + return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { isFirst, err := primaryInstanceState(s, nameFirst) if err != nil { return err @@ -1223,7 +865,7 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string } return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) - } + }) } // TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with @@ -1231,7 +873,7 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() - return func(s *terraform.State) error { + return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) if err != nil { return err @@ -1243,10 +885,18 @@ func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirs } return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) - } + }) } func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { + if nameFirst == nameSecond && keyFirst == keySecond { + return fmt.Errorf( + "comparing self: resource %s attribute %s", + nameFirst, + keyFirst, + ) + } + vFirst, okFirst := isFirst.Attributes[keyFirst] vSecond, okSecond := isSecond.Attributes[keySecond] @@ -1328,20 +978,6 @@ func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { } } -// TestT is the interface used to handle the test lifecycle of a test. -// -// Users should just use a *testing.T object, which implements this. -type TestT interface { - Error(args ...interface{}) - Fatal(args ...interface{}) - Skip(args ...interface{}) - Name() string - Parallel() -} - -// This is set to true by unit tests to alter some behavior -var testTesting = false - // modulePrimaryInstanceState returns the instance state for the given resource // name in a ModuleState func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { @@ -1376,46 +1012,34 @@ func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceS return modulePrimaryInstanceState(s, ms, name) } -// operationError is a specialized implementation of error used to describe -// failures during one of the several operations performed for a particular -// test case. -type operationError struct { - OpName string - Diags tfdiags.Diagnostics -} - -func newOperationError(opName string, diags tfdiags.Diagnostics) error { - return operationError{opName, diags} -} - -// Error returns a terse error string containing just the basic diagnostic -// messages, for situations where normal Go error behavior is appropriate. -func (err operationError) Error() string { - return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error()) +// indexesIntoTypeSet is a heuristic to try and identify if a flatmap style +// string address uses a precalculated TypeSet hash, which are integers and +// typically are large and obviously not a list index +func indexesIntoTypeSet(key string) bool { + for _, part := range strings.Split(key, ".") { + if i, err := strconv.Atoi(part); err == nil && i > 100 { + return true + } + } + return false } -// ErrorDetail is like Error except it includes verbosely-rendered diagnostics -// similar to what would come from a normal Terraform run, which include -// additional context not included in Error(). -func (err operationError) ErrorDetail() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "errors during %s:", err.OpName) - clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors} - for _, diag := range err.Diags { - diagStr := format.Diagnostic(diag, nil, clr, 78) - buf.WriteByte('\n') - buf.WriteString(diagStr) +func checkIfIndexesIntoTypeSet(key string, f TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + err := f(s) + if err != nil && s.IsBinaryDrivenTest && indexesIntoTypeSet(key) { + return fmt.Errorf("Error in test check: %s\nTest check address %q likely indexes into TypeSet\nThis is currently not possible in the SDK", err, key) + } + return err } - return buf.String() } -// detailedErrorMessage is a helper for calling ErrorDetail on an error if -// it is an operationError or just taking Error otherwise. -func detailedErrorMessage(err error) string { - switch tErr := err.(type) { - case operationError: - return tErr.ErrorDetail() - default: - return err.Error() +func checkIfIndexesIntoTypeSetPair(keyFirst, keySecond string, f TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + err := f(s) + if err != nil && s.IsBinaryDrivenTest && (indexesIntoTypeSet(keyFirst) || indexesIntoTypeSet(keySecond)) { + return fmt.Errorf("Error in test check: %s\nTest check address %q or %q likely indexes into TypeSet\nThis is currently not possible in the SDK", err, keyFirst, keySecond) + } + return err } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go new file mode 100644 index 00000000..1e39294f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go @@ -0,0 +1,25 @@ +package resource + +import ( + "errors" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testStepTaint(state *terraform.State, step TestStep) error { + for _, p := range step.Taint { + m := state.RootModule() + if m == nil { + return errors.New("no state") + } + rs, ok := m.Resources[p] + if !ok { + return fmt.Errorf("resource %q not found in state", p) + } + log.Printf("[WARN] Test: Explicitly tainting resource %q", p) + rs.Taint() + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go similarity index 79% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go index 8ba9c85d..42a6dcf3 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go @@ -5,39 +5,43 @@ import ( "log" "reflect" "strings" - "testing" "github.com/davecgh/go-spew/spew" tfjson "github.com/hashicorp/terraform-json" - "github.com/hashicorp/terraform-plugin-sdk/acctest" - "github.com/hashicorp/terraform-plugin-sdk/terraform" tftest "github.com/hashicorp/terraform-plugin-test" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func getState(t *testing.T, wd *tftest.WorkingDir) *terraform.State { - jsonState := wd.RequireState(t) - state, err := shimStateFromJson(jsonState) - if err != nil { - t.Fatal(err) +func runPostTestDestroy(t testing.T, c TestCase, wd *tftest.WorkingDir) error { + wd.RequireDestroy(t) + + if c.CheckDestroy != nil { + statePostDestroy := getState(t, wd) + + if err := c.CheckDestroy(statePostDestroy); err != nil { + return err + } } - return state + + return nil } -func RunNewTest(t *testing.T, c TestCase, providers map[string]terraform.ResourceProvider) { +func runNewTest(t testing.T, c TestCase, providers map[string]*schema.Provider) { spewConf := spew.NewDefaultConfig() spewConf.SortKeys = true wd := acctest.TestHelper.RequireNewWorkingDir(t) defer func() { - wd.RequireDestroy(t) - - if c.CheckDestroy != nil { - statePostDestroy := getState(t, wd) + statePreDestroy := getState(t, wd) - if err := c.CheckDestroy(statePostDestroy); err != nil { - t.Fatal(err) - } + if !stateIsEmpty(statePreDestroy) { + runPostTestDestroy(t, c, wd) } + wd.Close() }() @@ -98,6 +102,19 @@ func RunNewTest(t *testing.T, c TestCase, providers map[string]terraform.Resourc } } +func getState(t testing.T, wd *tftest.WorkingDir) *terraform.State { + jsonState := wd.RequireState(t) + state, err := shimStateFromJson(jsonState) + if err != nil { + t.Fatal(err) + } + return state +} + +func stateIsEmpty(state *terraform.State) bool { + return state.Empty() || !state.HasResources() +} + func planIsEmpty(plan *tfjson.Plan) bool { for _, rc := range plan.ResourceChanges { if rc.Mode == tfjson.DataResourceMode { @@ -114,7 +131,8 @@ func planIsEmpty(plan *tfjson.Plan) bool { } return true } -func testIDRefresh(c TestCase, t *testing.T, wd *tftest.WorkingDir, step TestStep, r *terraform.ResourceState) error { + +func testIDRefresh(c TestCase, t testing.T, wd *tftest.WorkingDir, step TestStep, r *terraform.ResourceState) error { spewConf := spew.NewDefaultConfig() spewConf.SortKeys = true diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go similarity index 91% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_config.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go index d2fbf29d..285d50e6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go @@ -1,14 +1,14 @@ package resource import ( - "testing" - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform-plugin-sdk/terraform" tftest "github.com/hashicorp/terraform-plugin-test" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepNewConfig(t *testing.T, c TestCase, wd *tftest.WorkingDir, step TestStep) error { +func testStepNewConfig(t testing.T, c TestCase, wd *tftest.WorkingDir, step TestStep) error { spewConf := spew.NewDefaultConfig() spewConf.SortKeys = true @@ -32,6 +32,7 @@ func testStepNewConfig(t *testing.T, c TestCase, wd *tftest.WorkingDir, step Tes state := getState(t, wd) if step.Check != nil { + state.IsBinaryDrivenTest = true if err := step.Check(state); err != nil { t.Fatal(err) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go similarity index 67% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_import_state.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go index ec2f9916..ec1bc35b 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_import_state.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go @@ -3,17 +3,16 @@ package resource import ( "reflect" "strings" - "testing" "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform-plugin-sdk/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/terraform" tftest "github.com/hashicorp/terraform-plugin-test" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepNewImportState(t *testing.T, c TestCase, wd *tftest.WorkingDir, step TestStep, cfg string) error { +func testStepNewImportState(t testing.T, c TestCase, wd *tftest.WorkingDir, step TestStep, cfg string) error { spewConf := spew.NewDefaultConfig() spewConf.SortKeys = true @@ -93,25 +92,6 @@ func testStepNewImportState(t *testing.T, c TestCase, wd *tftest.WorkingDir, ste r.Primary.ID) } - // We'll try our best to find the schema for this resource type - // so we can ignore Removed fields during validation. If we fail - // to find the schema then we won't ignore them and so the test - // will need to rely on explicit ImportStateVerifyIgnore, though - // this shouldn't happen in any reasonable case. - // KEM CHANGE FROM OLD FRAMEWORK: Fail test if this happens. - var rsrcSchema *schema.Resource - providerAddr, diags := addrs.ParseAbsProviderConfigStr("provider." + r.Provider + "." + r.Type) - if diags.HasErrors() { - t.Fatalf("Failed to find schema for resource with ID %s", r.Primary) - } - - providerType := providerAddr.ProviderConfig.Type - if provider, ok := step.providers[providerType]; ok { - if provider, ok := provider.(*schema.Provider); ok { - rsrcSchema = provider.ResourcesMap[r.Type] - } - } - // don't add empty flatmapped containers, so we can more easily // compare the attributes skipEmpty := func(k, v string) bool { @@ -154,27 +134,6 @@ func testStepNewImportState(t *testing.T, c TestCase, wd *tftest.WorkingDir, ste } } - // Also remove any attributes that are marked as "Removed" in the - // schema, if we have a schema to check that against. - if rsrcSchema != nil { - for k := range actual { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(actual, k) - break - } - } - } - for k := range expected { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(expected, k) - break - } - } - } - } - if !reflect.DeepEqual(actual, expected) { // Determine only the different attributes for k, v := range expected { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go similarity index 61% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go index e56a5155..cb8fb6b7 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go @@ -1,13 +1,19 @@ package resource import ( + "context" + "errors" + "log" "sync" "time" ) -// Retry is a basic wrapper around StateChangeConf that will just retry +// RetryContext is a basic wrapper around StateChangeConf that will just retry // a function until it no longer returns an error. -func Retry(timeout time.Duration, f RetryFunc) error { +// +// Cancellation from the passed in context will propagate through to the +// underlying StateChangeConf +func RetryContext(ctx context.Context, timeout time.Duration, f RetryFunc) error { // These are used to pull the error out of the function; need a mutex to // avoid a data race. var resultErr error @@ -38,7 +44,7 @@ func Retry(timeout time.Duration, f RetryFunc) error { }, } - _, waitErr := c.WaitForState() + _, waitErr := c.WaitForStateContext(ctx) // Need to acquire the lock here to be able to avoid race using resultErr as // the return value @@ -55,6 +61,15 @@ func Retry(timeout time.Duration, f RetryFunc) error { return resultErr } +// Retry is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Deprecated: Please use RetryContext to ensure proper plugin shutdown +func Retry(timeout time.Duration, f RetryFunc) error { + log.Printf("[WARN] Retry is deprecated, please use RetryContext") + return RetryContext(context.Background(), timeout, f) +} + // RetryFunc is the function retried until it succeeds. type RetryFunc func() *RetryError @@ -66,19 +81,31 @@ type RetryError struct { } // RetryableError is a helper to create a RetryError that's retryable from a -// given error. +// given error. To prevent logic errors, will return an error when passed a +// nil error. func RetryableError(err error) *RetryError { if err == nil { - return nil + return &RetryError{ + Err: errors.New("empty retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } } return &RetryError{Err: err, Retryable: true} } // NonRetryableError is a helper to create a RetryError that's _not_ retryable -// from a given error. +// from a given error. To prevent logic errors, will return an error when +// passed a nil error. func NonRetryableError(err error) *RetryError { if err == nil { - return nil + return &RetryError{ + Err: errors.New("empty non-retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } } return &RetryError{Err: err, Retryable: false} } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/README.md b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/README.md similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/README.md rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/README.md diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go similarity index 94% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go index d16abef8..bb1fc1db 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go @@ -3,14 +3,27 @@ package schema import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// StringKind represents the format a string is in. +type StringKind configschema.StringKind + +const ( + // StringPlain indicates a string is plain-text and requires no processing for display. + StringPlain = StringKind(configschema.StringPlain) + + // StringMarkdown indicates a string is in markdown format and may + // require additional processing to display. + StringMarkdown = StringKind(configschema.StringMarkdown) ) var ( // DescriptionKind is the default StringKind of descriptions in this provider. // It defaults to StringPlain but can be globally switched to StringMarkdown. - DescriptionKind = configschema.StringPlain + DescriptionKind = StringPlain // SchemaDescriptionBuilder converts helper/schema.Schema Descriptions to configschema.Attribute // and Block Descriptions. This method can be used to modify the description text prior to it @@ -136,7 +149,7 @@ func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { } desc := SchemaDescriptionBuilder(s) - descKind := DescriptionKind + descKind := configschema.StringKind(DescriptionKind) if desc == "" { // fallback to plain text if empty descKind = configschema.StringPlain @@ -163,7 +176,7 @@ func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { ret.Block = *nested desc := SchemaDescriptionBuilder(s) - descKind := DescriptionKind + descKind := configschema.StringKind(DescriptionKind) if desc == "" { // fallback to plain text if empty descKind = configschema.StringPlain @@ -272,7 +285,7 @@ func (r *Resource) CoreConfigSchema() *configschema.Block { block := r.coreConfigSchema() desc := ResourceDescriptionBuilder(r) - descKind := DescriptionKind + descKind := configschema.StringKind(DescriptionKind) if desc == "" { // fallback to plain text if empty descKind = configschema.StringPlain @@ -353,9 +366,3 @@ func (r *Resource) CoreConfigSchema() *configschema.Block { func (r *Resource) coreConfigSchema() *configschema.Block { return schemaMap(r.Schema).CoreConfigSchema() } - -// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema -// on the backends's schema. -func (r *Backend) CoreConfigSchema() *configschema.Block { - return schemaMap(r.Schema).CoreConfigSchema() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go similarity index 96% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go index 622e9b13..c7721bcd 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go @@ -3,7 +3,6 @@ package schema import ( "fmt" "strconv" - "strings" ) // FieldReaders are responsible for decoding fields out of data into @@ -42,13 +41,6 @@ func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { return s.ZeroValue() } -// SchemasForFlatmapPath tries its best to find a sequence of schemas that -// the given dot-delimited attribute path traverses through. -func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema { - parts := strings.Split(path, ".") - return addrToSchema(parts, schemaMap) -} - // addrToSchema finds the final element schema for the given address // and the given schema. It returns all the schemas that led to the final // schema. These are in order of the address (out to in). diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go similarity index 85% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go index dc2ae1af..3f1f5e8a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go @@ -7,8 +7,9 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // ConfigFieldReader reads fields out of an untyped map[string]string to the @@ -81,31 +82,16 @@ func (r *ConfigFieldReader) readField( k := strings.Join(address, ".") schema := schemaList[len(schemaList)-1] - // If we're getting the single element of a promoted list, then - // check to see if we have a single element we need to promote. - if address[len(address)-1] == "0" && len(schemaList) > 1 { - lastSchema := schemaList[len(schemaList)-2] - if lastSchema.Type == TypeList && lastSchema.PromoteSingle { - k := strings.Join(address[:len(address)-1], ".") - result, err := r.readPrimitive(k, schema) - if err == nil { - return result, nil - } - } - } - - if protoVersion5 { - switch schema.Type { - case TypeList, TypeSet, TypeMap, typeObject: - // Check if the value itself is unknown. - // The new protocol shims will add unknown values to this list of - // ComputedKeys. This is the only way we have to indicate that a - // collection is unknown in the config - for _, unknown := range r.Config.ComputedKeys { - if k == unknown { - log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) - return FieldReadResult{Computed: true, Exists: true}, nil - } + switch schema.Type { + case TypeList, TypeSet, TypeMap, typeObject: + // Check if the value itself is unknown. + // The new protocol shims will add unknown values to this list of + // ComputedKeys. This is the only way we have to indicate that a + // collection is unknown in the config + for _, unknown := range r.Config.ComputedKeys { + if k == unknown { + log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) + return FieldReadResult{Computed: true, Exists: true}, nil } } } @@ -114,17 +100,6 @@ func (r *ConfigFieldReader) readField( case TypeBool, TypeFloat, TypeInt, TypeString: return r.readPrimitive(k, schema) case TypeList: - // If we support promotion then we first check if we have a lone - // value that we must promote. - // a value that is alone. - if schema.PromoteSingle { - result, err := r.readPrimitive(k, schema.Elem.(*Schema)) - if err == nil && result.Exists { - result.Value = []interface{}{result.Value} - return result, nil - } - } - return readListField(&nestedConfigFieldReader{r}, address, schema) case TypeMap: return r.readMap(k, schema) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go similarity index 99% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go index c099029a..642e7f32 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go @@ -4,8 +4,9 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // DiffFieldReader reads fields out of a diff structures. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go similarity index 88% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go index 53f73b71..092dd7f6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go @@ -200,36 +200,3 @@ func (r BasicMapReader) Range(f func(string, string) bool) bool { return true } - -// MultiMapReader reads over multiple maps, preferring keys that are -// founder earlier (lower number index) vs. later (higher number index) -type MultiMapReader []map[string]string - -func (r MultiMapReader) Access(k string) (string, bool) { - for _, m := range r { - if v, ok := m[k]; ok { - return v, ok - } - } - - return "", false -} - -func (r MultiMapReader) Range(f func(string, string) bool) bool { - done := make(map[string]struct{}) - for _, m := range r { - for k, v := range m { - if _, ok := done[k]; ok { - continue - } - - if cont := f(k, v); !cont { - return false - } - - done[k] = struct{}{} - } - } - - return true -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/getsource_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/getsource_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/getsource_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go similarity index 55% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go index 2f88f1ea..83d40aa2 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go @@ -4,12 +4,14 @@ import ( "context" "errors" "fmt" + "log" "sort" - "sync" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) var ReservedProviderFields = []string{ @@ -50,35 +52,43 @@ type Provider struct { // and must *not* implement Create, Update or Delete. DataSourcesMap map[string]*Resource + // ProviderMetaSchema is the schema for the configuration of the meta + // information for this provider. If this provider has no meta info, + // this can be omitted. This functionality is currently experimental + // and subject to change or break without warning; it should only be + // used by providers that are collaborating on its use with the + // Terraform team. + ProviderMetaSchema map[string]*Schema + // ConfigureFunc is a function for configuring the provider. If the // provider doesn't need to be configured, this can be omitted. // - // See the ConfigureFunc documentation for more information. + // Deprecated: Please use ConfigureContextFunc instead. ConfigureFunc ConfigureFunc - // MetaReset is called by TestReset to reset any state stored in the meta - // interface. This is especially important if the StopContext is stored by - // the provider. - MetaReset func() error + // ConfigureContextFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. This function + // receives a context.Context that will cancel when Terraform sends a + // cancellation signal. This function can yield Diagnostics + ConfigureContextFunc ConfigureContextFunc meta interface{} - // a mutex is required because TestReset can directly replace the stopCtx - stopMu sync.Mutex - stopCtx context.Context - stopCtxCancel context.CancelFunc - stopOnce sync.Once - TerraformVersion string } // ConfigureFunc is the function used to configure a Provider. // +// Deprecated: Please use ConfigureContextFunc +type ConfigureFunc func(*ResourceData) (interface{}, error) + +// ConfigureContextFunc is the function used to configure a Provider. +// // The interface{} value returned by this function is stored and passed into // the subsequent resources as the meta parameter. This return value is // usually used to pass along a configured API client, a configuration // structure, etc. -type ConfigureFunc func(*ResourceData) (interface{}, error) +type ConfigureContextFunc func(context.Context, *ResourceData) (interface{}, diag.Diagnostics) // InternalValidate should be called to validate the structure // of the provider. @@ -91,6 +101,10 @@ func (p *Provider) InternalValidate() error { return errors.New("provider is nil") } + if p.ConfigureFunc != nil && p.ConfigureContextFunc != nil { + return errors.New("ConfigureFunc and ConfigureContextFunc must not both be set") + } + var validationErrors error sm := schemaMap(p.Schema) if err := sm.InternalValidate(sm); err != nil { @@ -116,6 +130,11 @@ func (p *Provider) InternalValidate() error { } } + // Warn of deprecations + if p.ConfigureFunc != nil && p.ConfigureContextFunc == nil { + log.Printf("[WARN] ConfigureFunc is deprecated, please use ConfigureContextFunc") + } + return validationErrors } @@ -141,58 +160,13 @@ func (p *Provider) SetMeta(v interface{}) { p.meta = v } -// Stopped reports whether the provider has been stopped or not. -func (p *Provider) Stopped() bool { - ctx := p.StopContext() - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -// StopCh returns a channel that is closed once the provider is stopped. -func (p *Provider) StopContext() context.Context { - p.stopOnce.Do(p.stopInit) - - p.stopMu.Lock() - defer p.stopMu.Unlock() - - return p.stopCtx -} - -func (p *Provider) stopInit() { - p.stopMu.Lock() - defer p.stopMu.Unlock() - - p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) -} - -// Stop implementation of terraform.ResourceProvider interface. -func (p *Provider) Stop() error { - p.stopOnce.Do(p.stopInit) - - p.stopMu.Lock() - defer p.stopMu.Unlock() - - p.stopCtxCancel() - return nil -} - -// TestReset resets any state stored in the Provider, and will call TestReset -// on Meta if it implements the TestProvider interface. -// This may be used to reset the schema.Provider at the start of a test, and is -// automatically called by resource.Test. -func (p *Provider) TestReset() error { - p.stopInit() - if p.MetaReset != nil { - return p.MetaReset() - } - return nil -} - -// GetSchema implementation of terraform.ResourceProvider interface +// GetSchema returns the config schema for the main provider +// configuration, as would appear in a "provider" block in the +// configuration files. +// +// Currently not all providers support schema. Callers must therefore +// first call Resources and DataSources and ensure that at least one +// resource or data source has the SchemaAvailable flag set. func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { resourceTypes := map[string]*configschema.Block{} dataSources := map[string]*configschema.Block{} @@ -215,125 +189,103 @@ func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.P }, nil } -// Input implementation of terraform.ResourceProvider interface. -func (p *Provider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - return schemaMap(p.Schema).Input(input, c) -} - -// Validate implementation of terraform.ResourceProvider interface. -func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) { +// Validate is called once at the beginning with the raw configuration +// (no interpolation done) and can return diagnostics +// +// This is called once with the provider configuration only. It may not +// be called at all if no provider configuration is given. +// +// This should not assume that any values of the configurations are valid. +// The primary use case of this call is to check that required keys are +// set. +func (p *Provider) Validate(c *terraform.ResourceConfig) diag.Diagnostics { if err := p.InternalValidate(); err != nil { - return nil, []error{fmt.Errorf( - "Internal validation of the provider failed! This is always a bug\n"+ - "with the provider itself, and not a user issue. Please report\n"+ - "this bug:\n\n%s", err)} + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: "InternalValidate", + Detail: fmt.Sprintf("Internal validation of the provider failed! This is always a bug\n"+ + "with the provider itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err), + }, + } } return schemaMap(p.Schema).Validate(c) } -// ValidateResource implementation of terraform.ResourceProvider interface. +// ValidateResource is called once at the beginning with the raw +// configuration (no interpolation done) and can return diagnostics. +// +// This is called once per resource. +// +// This should not assume any of the values in the resource configuration +// are valid since it is possible they have to be interpolated still. +// The primary use case of this call is to check that the required keys +// are set and that the general structure is correct. func (p *Provider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { + t string, c *terraform.ResourceConfig) diag.Diagnostics { r, ok := p.ResourcesMap[t] if !ok { - return nil, []error{fmt.Errorf( - "Provider doesn't support resource: %s", t)} + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: fmt.Sprintf("Provider doesn't support resource: %s", t), + }, + } } return r.Validate(c) } -// Configure implementation of terraform.ResourceProvider interface. -func (p *Provider) Configure(c *terraform.ResourceConfig) error { +// Configure configures the provider itself with the configuration +// given. This is useful for setting things like access keys. +// +// This won't be called at all if no provider configuration is given. +func (p *Provider) Configure(ctx context.Context, c *terraform.ResourceConfig) diag.Diagnostics { + + var diags diag.Diagnostics + // No configuration - if p.ConfigureFunc == nil { - return nil + if p.ConfigureFunc == nil && p.ConfigureContextFunc == nil { + return diags } sm := schemaMap(p.Schema) // Get a ResourceData for this configuration. To do this, we actually // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c, nil, p.meta, true) + diff, err := sm.Diff(ctx, nil, c, nil, p.meta, true) if err != nil { - return err + return append(diags, diag.FromErr(err)) } data, err := sm.Data(nil, diff) if err != nil { - return err - } - - if p.TerraformVersion == "" { - // Terraform 0.12 introduced this field to the protocol - // We can therefore assume that if it's unconfigured at this point, it's 0.10 or 0.11 - p.TerraformVersion = "0.11+compatible" - } - meta, err := p.ConfigureFunc(data) - if err != nil { - return err + return append(diags, diag.FromErr(err)) } - p.meta = meta - return nil -} - -// Apply implementation of terraform.ResourceProvider interface. -func (p *Provider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Apply(s, d, p.meta) -} - -// Diff implementation of terraform.ResourceProvider interface. -func (p *Provider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Diff(s, c, p.meta) -} - -// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't -// attempt to calculate ignore_changes. -func (p *Provider) SimpleDiff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) + if p.ConfigureFunc != nil { + meta, err := p.ConfigureFunc(data) + if err != nil { + return append(diags, diag.FromErr(err)) + } + p.meta = meta } - - return r.simpleDiff(s, c, p.meta) -} - -// Refresh implementation of terraform.ResourceProvider interface. -func (p *Provider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) + if p.ConfigureContextFunc != nil { + meta, ds := p.ConfigureContextFunc(ctx, data) + diags = append(diags, ds...) + if diags.HasError() { + return diags + } + p.meta = meta } - return r.Refresh(s, p.meta) + return diags } -// Resources implementation of terraform.ResourceProvider interface. +// Resources returns all the available resource types that this provider +// knows how to manage. func (p *Provider) Resources() []terraform.ResourceType { keys := make([]string, 0, len(p.ResourcesMap)) for k := range p.ResourcesMap { @@ -364,7 +316,22 @@ func (p *Provider) Resources() []terraform.ResourceType { return result } +// ImportState requests that the given resource be imported. +// +// The returned InstanceState only requires ID be set. Importing +// will always call Refresh after the state to complete it. +// +// IMPORTANT: InstanceState doesn't have the resource type attached +// to it. A type must be specified on the state via the Ephemeral +// field on the state. +// +// This function can return multiple states. Normally, an import +// will map 1:1 to a physical resource. However, some resources map +// to multiple. For example, an AWS security group may contain many rules. +// Each rule is represented by a separate resource in Terraform, +// therefore multiple states are returned. func (p *Provider) ImportState( + ctx context.Context, info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) { // Find the resource @@ -385,9 +352,13 @@ func (p *Provider) ImportState( // Call the import function results := []*ResourceData{data} - if r.Importer.State != nil { + if r.Importer.State != nil || r.Importer.StateContext != nil { var err error - results, err = r.Importer.State(data, p.meta) + if r.Importer.StateContext != nil { + results, err = r.Importer.StateContext(ctx, data, p.meta) + } else { + results, err = r.Importer.State(data, p.meta) + } if err != nil { return nil, err } @@ -413,45 +384,32 @@ func (p *Provider) ImportState( return states, nil } -// ValidateDataSource implementation of terraform.ResourceProvider interface. +// ValidateDataSource is called once at the beginning with the raw +// configuration (no interpolation done) and can return diagnostics. +// +// This is called once per data source instance. +// +// This should not assume any of the values in the resource configuration +// are valid since it is possible they have to be interpolated still. +// The primary use case of this call is to check that the required keys +// are set and that the general structure is correct. func (p *Provider) ValidateDataSource( - t string, c *terraform.ResourceConfig) ([]string, []error) { + t string, c *terraform.ResourceConfig) diag.Diagnostics { r, ok := p.DataSourcesMap[t] if !ok { - return nil, []error{fmt.Errorf( - "Provider doesn't support data source: %s", t)} + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: fmt.Sprintf("Provider doesn't support data source: %s", t), + }, + } } return r.Validate(c) } -// ReadDataDiff implementation of terraform.ResourceProvider interface. -func (p *Provider) ReadDataDiff( - info *terraform.InstanceInfo, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - - r, ok := p.DataSourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown data source: %s", info.Type) - } - - return r.Diff(nil, c, p.meta) -} - -// RefreshData implementation of terraform.ResourceProvider interface. -func (p *Provider) ReadDataApply( - info *terraform.InstanceInfo, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - - r, ok := p.DataSourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown data source: %s", info.Type) - } - - return r.ReadDataApply(d, p.meta) -} - -// DataSources implementation of terraform.ResourceProvider interface. +// DataSources returns all of the available data sources that this +// provider implements. func (p *Provider) DataSources() []terraform.DataSource { keys := make([]string, 0, len(p.DataSourcesMap)) for k := range p.DataSourcesMap { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go similarity index 71% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go index 75cfe885..dfeaba3a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go @@ -1,13 +1,16 @@ package schema import ( + "context" "errors" "fmt" "log" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) var ReservedDataSourceFields = []string{ @@ -63,12 +66,6 @@ type Resource struct { // their Versioning at any integer >= 1 SchemaVersion int - // MigrateState is deprecated and any new changes to a resource's schema - // should be handled by StateUpgraders. Existing MigrateState implementations - // should remain for compatibility with existing state. MigrateState will - // still be called if the stored SchemaVersion is less than the - // first version of the StateUpgraders. - // // MigrateState is responsible for updating an InstanceState with an old // version to the format expected by the current version of the Schema. // @@ -79,6 +76,12 @@ type Resource struct { // the InstanceState that needs updating, as well as the configured // provider's configured meta interface{}, in case the migration process // needs to make any remote API calls. + // + // Deprecated: MigrateState is deprecated and any new changes to a resource's schema + // should be handled by StateUpgraders. Existing MigrateState implementations + // should remain for compatibility with existing state. MigrateState will + // still be called if the stored SchemaVersion is less than the + // first version of the StateUpgraders. MigrateState StateMigrateFunc // StateUpgraders contains the functions responsible for upgrading an @@ -95,8 +98,27 @@ type Resource struct { // The functions below are the CRUD operations for this resource. // - // The only optional operation is Update. If Update is not implemented, - // then updates will not be supported for this resource. + // Deprecated: Please use the context aware equivalents instead. Only one of + // the operations or context aware equivalent can be set, not both. + Create CreateFunc + Read ReadFunc + Update UpdateFunc + Delete DeleteFunc + + // Exists is a function that is called to check if a resource still + // exists. If this returns false, then this will affect the diff + // accordingly. If this function isn't set, it will not be called. You + // can also signal existence in the Read method by calling d.SetId("") + // if the Resource is no longer present and should be removed from state. + // The *ResourceData passed to Exists should _not_ be modified. + // + // Deprecated: ReadContext should be able to encapsulate the logic of Exists + Exists ExistsFunc + + // The functions below are the CRUD operations for this resource. + // + // The only optional operation is Update. If Update is not + // implemented, then updates will not be supported for this resource. // // The ResourceData parameter in the functions below are used to // query configuration and changes for the resource as well as to set @@ -107,21 +129,19 @@ type Resource struct { // a ConfigureFunc, this will be nil. This parameter should be used // to store API clients, configuration structures, etc. // - // If any errors occur during each of the operation, an error should be - // returned. If a resource was partially updated, be careful to enable - // partial state mode for ResourceData and use it accordingly. + // These functions are passed a context configured to timeout with whatever + // was set as the timeout for this operation. Useful for forwarding on to + // backend SDK's that accept context. The context will also cancel if + // Terraform sends a cancellation signal. // - // Exists is a function that is called to check if a resource still - // exists. If this returns false, then this will affect the diff - // accordingly. If this function isn't set, it will not be called. You - // can also signal existence in the Read method by calling d.SetId("") - // if the Resource is no longer present and should be removed from state. - // The *ResourceData passed to Exists should _not_ be modified. - Create CreateFunc - Read ReadFunc - Update UpdateFunc - Delete DeleteFunc - Exists ExistsFunc + // These functions return diagnostics, allowing developers to build + // a list of warnings and errors to be presented to the Terraform user. + // The AttributePath of those diagnostics should be built within these + // functions, please consult go-cty documentation for building a cty.Path + CreateContext CreateContextFunc + ReadContext ReadContextFunc + UpdateContext UpdateContextFunc + DeleteContext DeleteContextFunc // CustomizeDiff is a custom function for working with the diff that // Terraform has created for this resource - it can be used to customize the @@ -143,11 +163,6 @@ type Resource struct { // // This function needs to be resilient to support all scenarios. // - // If this function needs to access external API resources, remember to flag - // the RequiresRefresh attribute mentioned below to ensure that - // -refresh=false is blocked when running plan or apply, as this means that - // this resource requires refresh-like behaviour to work effectively. - // // For the most part, only computed fields can be customized by this // function. // @@ -200,20 +215,26 @@ func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.Insta return s, nil } -// See Resource documentation. +// The following function types are of the legacy CRUD operations. +// +// Deprecated: Please use the context aware equivalents instead. type CreateFunc func(*ResourceData, interface{}) error +type ReadFunc func(*ResourceData, interface{}) error +type UpdateFunc func(*ResourceData, interface{}) error +type DeleteFunc func(*ResourceData, interface{}) error +type ExistsFunc func(*ResourceData, interface{}) (bool, error) // See Resource documentation. -type ReadFunc func(*ResourceData, interface{}) error +type CreateContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics // See Resource documentation. -type UpdateFunc func(*ResourceData, interface{}) error +type ReadContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics // See Resource documentation. -type DeleteFunc func(*ResourceData, interface{}) error +type UpdateContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics // See Resource documentation. -type ExistsFunc func(*ResourceData, interface{}) (bool, error) +type DeleteContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics // See Resource documentation. type StateMigrateFunc func( @@ -238,19 +259,79 @@ type StateUpgrader struct { } // See StateUpgrader -type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) +type StateUpgradeFunc func(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) // See Resource documentation. -type CustomizeDiffFunc func(*ResourceDiff, interface{}) error +type CustomizeDiffFunc func(context.Context, *ResourceDiff, interface{}) error + +func (r *Resource) create(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Create != nil { + var diags diag.Diagnostics + if err := r.Create(d, meta); err != nil { + diags = append(diags, diag.FromErr(err)) + } + return diags + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutCreate)) + defer cancel() + return r.CreateContext(ctx, d, meta) +} + +func (r *Resource) read(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Read != nil { + var diags diag.Diagnostics + if err := r.Read(d, meta); err != nil { + diags = append(diags, diag.FromErr(err)) + } + return diags + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutRead)) + defer cancel() + return r.ReadContext(ctx, d, meta) +} + +func (r *Resource) update(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Update != nil { + var diags diag.Diagnostics + if err := r.Update(d, meta); err != nil { + diags = append(diags, diag.FromErr(err)) + } + return diags + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutUpdate)) + defer cancel() + return r.UpdateContext(ctx, d, meta) +} + +func (r *Resource) delete(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Delete != nil { + var diags diag.Diagnostics + if err := r.Delete(d, meta); err != nil { + diags = append(diags, diag.FromErr(err)) + } + return diags + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutDelete)) + defer cancel() + return r.DeleteContext(ctx, d, meta) +} // Apply creates, updates, and/or deletes a resource. func (r *Resource) Apply( + ctx context.Context, s *terraform.InstanceState, d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { + meta interface{}) (*terraform.InstanceState, diag.Diagnostics) { + + var diags diag.Diagnostics + data, err := schemaMap(r.Schema).Data(s, d) if err != nil { - return s, err + return s, append(diags, diag.FromErr(err)) + } + + if s != nil && data != nil { + data.providerMeta = s.ProviderMeta } // Instance Diff shoould have the timeout info, need to copy it over to the @@ -280,8 +361,10 @@ func (r *Resource) Apply( if d.Destroy || d.RequiresNew() { if s.ID != "" { // Destroy the resource since it is created - if err := r.Delete(data, meta); err != nil { - return r.recordCurrentSchemaVersion(data.State()), err + diags = append(diags, r.delete(ctx, data, meta)...) + + if diags.HasError() { + return r.recordCurrentSchemaVersion(data.State()), diags } // Make sure the ID is gone. @@ -291,36 +374,39 @@ func (r *Resource) Apply( // If we're only destroying, and not creating, then return // now since we're done! if !d.RequiresNew() { - return nil, nil + return nil, diags } // Reset the data to be stateless since we just destroyed data, err = schemaMap(r.Schema).Data(nil, d) - // data was reset, need to re-apply the parsed timeouts - data.timeouts = &rt if err != nil { - return nil, err + return nil, append(diags, diag.FromErr(err)) } + + // data was reset, need to re-apply the parsed timeouts + data.timeouts = &rt } - err = nil if data.Id() == "" { // We're creating, it is a new resource. data.MarkNewResource() - err = r.Create(data, meta) + diags = append(diags, r.create(ctx, data, meta)...) } else { - if r.Update == nil { - return s, fmt.Errorf("doesn't support update") + if !r.updateFuncSet() { + return s, append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "doesn't support update", + }) } - - err = r.Update(data, meta) + diags = append(diags, r.update(ctx, data, meta)...) } - return r.recordCurrentSchemaVersion(data.State()), err + return r.recordCurrentSchemaVersion(data.State()), diags } // Diff returns a diff of this resource. func (r *Resource) Diff( + ctx context.Context, s *terraform.InstanceState, c *terraform.ResourceConfig, meta interface{}) (*terraform.InstanceDiff, error) { @@ -332,7 +418,7 @@ func (r *Resource) Diff( return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) } - instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true) + instanceDiff, err := schemaMap(r.Schema).Diff(ctx, s, c, r.CustomizeDiff, meta, true) if err != nil { return instanceDiff, err } @@ -348,12 +434,13 @@ func (r *Resource) Diff( return instanceDiff, err } -func (r *Resource) simpleDiff( +func (r *Resource) SimpleDiff( + ctx context.Context, s *terraform.InstanceState, c *terraform.ResourceConfig, meta interface{}) (*terraform.InstanceDiff, error) { - instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) + instanceDiff, err := schemaMap(r.Schema).Diff(ctx, s, c, r.CustomizeDiff, meta, false) if err != nil { return instanceDiff, err } @@ -377,30 +464,39 @@ func (r *Resource) simpleDiff( } // Validate validates the resource configuration against the schema. -func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { - warns, errs := schemaMap(r.Schema).Validate(c) +func (r *Resource) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + diags := schemaMap(r.Schema).Validate(c) if r.DeprecationMessage != "" { - warns = append(warns, r.DeprecationMessage) + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Deprecated Resource", + Detail: r.DeprecationMessage, + }) } - return warns, errs + return diags } // ReadDataApply loads the data for a data source, given a diff that // describes the configuration arguments and desired computed attributes. func (r *Resource) ReadDataApply( + ctx context.Context, d *terraform.InstanceDiff, meta interface{}, -) (*terraform.InstanceState, error) { +) (*terraform.InstanceState, diag.Diagnostics) { + + var diags diag.Diagnostics + // Data sources are always built completely from scratch // on each read, so the source state is always nil. data, err := schemaMap(r.Schema).Data(nil, d) if err != nil { - return nil, err + return nil, append(diags, diag.FromErr(err)) } - err = r.Read(data, meta) + diags = append(diags, r.read(ctx, data, meta)...) + state := data.State() if state != nil && state.ID == "" { // Data sources can set an ID if they want, but they aren't @@ -410,7 +506,7 @@ func (r *Resource) ReadDataApply( state.ID = "-" } - return r.recordCurrentSchemaVersion(state), err + return r.recordCurrentSchemaVersion(state), diags } // RefreshWithoutUpgrade reads the instance state, but does not call @@ -418,61 +514,15 @@ func (r *Resource) ReadDataApply( // separate API call. // RefreshWithoutUpgrade is part of the new plugin shims. func (r *Resource) RefreshWithoutUpgrade( + ctx context.Context, s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the ID is already somehow blank, it doesn't exist - if s.ID == "" { - return nil, nil - } + meta interface{}) (*terraform.InstanceState, diag.Diagnostics) { - rt := ResourceTimeout{} - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - - if r.Exists != nil { - // Make a copy of data so that if it is modified it doesn't - // affect our Read later. - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt + var diags diag.Diagnostics - if err != nil { - return s, err - } - - exists, err := r.Exists(data, meta) - if err != nil { - return s, err - } - if !exists { - return nil, nil - } - } - - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - if err != nil { - return s, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - state = nil - } - - return r.recordCurrentSchemaVersion(state), err -} - -// Refresh refreshes the state of the resource. -func (r *Resource) Refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { // If the ID is already somehow blank, it doesn't exist if s.ID == "" { - return nil, nil + return nil, diags } rt := ResourceTimeout{} @@ -486,105 +536,59 @@ func (r *Resource) Refresh( // Make a copy of data so that if it is modified it doesn't // affect our Read later. data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return s, append(diags, diag.FromErr(err)) + } data.timeouts = &rt - if err != nil { - return s, err + if s != nil { + data.providerMeta = s.ProviderMeta } exists, err := r.Exists(data, meta) if err != nil { - return s, err + return s, append(diags, diag.FromErr(err)) } + if !exists { - return nil, nil + return nil, diags } } - // there may be new StateUpgraders that need to be run - s, err := r.upgradeState(s, meta) + data, err := schemaMap(r.Schema).Data(s, nil) if err != nil { - return s, err + return s, append(diags, diag.FromErr(err)) } - - data, err := schemaMap(r.Schema).Data(s, nil) data.timeouts = &rt - if err != nil { - return s, err + + if s != nil { + data.providerMeta = s.ProviderMeta } - err = r.Read(data, meta) + diags = append(diags, r.read(ctx, data, meta)...) + state := data.State() if state != nil && state.ID == "" { state = nil } - return r.recordCurrentSchemaVersion(state), err + return r.recordCurrentSchemaVersion(state), diags } -func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - var err error - - needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) - migrate := needsMigration && r.MigrateState != nil - - if migrate { - s, err = r.MigrateState(stateSchemaVersion, s, meta) - if err != nil { - return s, err - } - } - - if len(r.StateUpgraders) == 0 { - return s, nil - } - - // If we ran MigrateState, then the stateSchemaVersion value is no longer - // correct. We can expect the first upgrade function to be the correct - // schema type version. - if migrate { - stateSchemaVersion = r.StateUpgraders[0].Version - } - - schemaType := r.CoreConfigSchema().ImpliedType() - // find the expected type to convert the state - for _, upgrader := range r.StateUpgraders { - if stateSchemaVersion == upgrader.Version { - schemaType = upgrader.Type - } - } - - // StateUpgraders only operate on the new JSON format state, so the state - // need to be converted. - stateVal, err := StateValueFromInstanceState(s, schemaType) - if err != nil { - return nil, err - } - - jsonState, err := StateValueToJSONMap(stateVal, schemaType) - if err != nil { - return nil, err - } - - for _, upgrader := range r.StateUpgraders { - if stateSchemaVersion != upgrader.Version { - continue - } +func (r *Resource) createFuncSet() bool { + return (r.Create != nil || r.CreateContext != nil) +} - jsonState, err = upgrader.Upgrade(jsonState, meta) - if err != nil { - return nil, err - } - stateSchemaVersion++ - } +func (r *Resource) readFuncSet() bool { + return (r.Read != nil || r.ReadContext != nil) +} - // now we need to re-flatmap the new state - stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema()) - if err != nil { - return nil, err - } +func (r *Resource) updateFuncSet() bool { + return (r.Update != nil || r.UpdateContext != nil) +} - return r.ShimInstanceStateFromValue(stateVal) +func (r *Resource) deleteFuncSet() bool { + return (r.Delete != nil || r.DeleteContext != nil) } // InternalValidate should be called to validate the structure @@ -603,7 +607,7 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error } if !writable { - if r.Create != nil || r.Update != nil || r.Delete != nil { + if r.createFuncSet() || r.updateFuncSet() || r.deleteFuncSet() { return fmt.Errorf("must not implement Create, Update or Delete") } @@ -617,7 +621,7 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error if r.isTopLevel() && writable { // All non-Computed attributes must be ForceNew if Update is not defined - if r.Update == nil { + if !r.updateFuncSet() { nonForceNewAttrs := make([]string, 0) for k, v := range r.Schema { if !v.ForceNew && !v.Computed { @@ -645,10 +649,10 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error tsm = schemaMap(r.Schema) // Destroy, and Read are required - if r.Read == nil { + if !r.readFuncSet() { return fmt.Errorf("Read must be implemented") } - if r.Delete == nil { + if !r.deleteFuncSet() { return fmt.Errorf("Delete must be implemented") } @@ -701,6 +705,40 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error } } + // check context funcs are not set alongside their nonctx counterparts + if r.CreateContext != nil && r.Create != nil { + return fmt.Errorf("CreateContext and Create should not both be set") + } + if r.ReadContext != nil && r.Read != nil { + return fmt.Errorf("ReadContext and Read should not both be set") + } + if r.UpdateContext != nil && r.Update != nil { + return fmt.Errorf("UpdateContext and Update should not both be set") + } + if r.DeleteContext != nil && r.Delete != nil { + return fmt.Errorf("DeleteContext and Delete should not both be set") + } + + // Warn of deprecations + if r.Exists != nil { + log.Printf("[WARN] Exists is deprecated, please encapsulate the logic in ReadContext") + } + if r.Create != nil && r.CreateContext == nil { + log.Printf("[WARN] Create is deprecated, please use CreateContext") + } + if r.Read != nil && r.ReadContext == nil { + log.Printf("[WARN] Read is deprecated, please use ReadContext") + } + if r.Update != nil && r.UpdateContext == nil { + log.Printf("[WARN] Update is deprecated, please use UpdateContext") + } + if r.Delete != nil && r.DeleteContext == nil { + log.Printf("[WARN] Delete is deprecated, please use DeleteContext") + } + if r.MigrateState != nil { + log.Printf("[WARN] MigrateState is deprecated, please use StateUpgraders") + } + return schemaMap(r.Schema).InternalValidate(tsm) } @@ -716,7 +754,7 @@ func isReservedDataSourceFieldName(name string) bool { func isReservedResourceFieldName(name string, s *Schema) bool { // Allow phasing out "id" // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 - if name == "id" && (s.Deprecated != "" || s.Removed != "") { + if name == "id" && s.Deprecated != "" { return false } @@ -767,47 +805,10 @@ func (r *Resource) TestResourceData() *ResourceData { } } -// SchemasForFlatmapPath tries its best to find a sequence of schemas that -// the given dot-delimited attribute path traverses through in the schema -// of the receiving Resource. -func (r *Resource) SchemasForFlatmapPath(path string) []*Schema { - return SchemasForFlatmapPath(path, r.Schema) -} - // Returns true if the resource is "top level" i.e. not a sub-resource. func (r *Resource) isTopLevel() bool { // TODO: This is a heuristic; replace with a definitive attribute? - return (r.Create != nil || r.Read != nil) -} - -// Determines if a given InstanceState needs to be migrated by checking the -// stored version number with the current SchemaVersion -func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { - // Get the raw interface{} value for the schema version. If it doesn't - // exist or is nil then set it to zero. - raw := is.Meta["schema_version"] - if raw == nil { - raw = "0" - } - - // Try to convert it to a string. If it isn't a string then we pretend - // that it isn't set at all. It should never not be a string unless it - // was manually tampered with. - rawString, ok := raw.(string) - if !ok { - rawString = "0" - } - - stateSchemaVersion, _ := strconv.Atoi(rawString) - - // Don't run MigrateState if the version is handled by a StateUpgrader, - // since StateMigrateFuncs are not required to handle unknown versions - maxVersion := r.SchemaVersion - if len(r.StateUpgraders) > 0 { - maxVersion = r.StateUpgraders[0].Version - } - - return stateSchemaVersion < maxVersion, stateSchemaVersion + return (r.createFuncSet() || r.readFuncSet()) } func (r *Resource) recordCurrentSchemaVersion( @@ -827,6 +828,12 @@ func Noop(*ResourceData, interface{}) error { return nil } +// NoopContext is a convenience implementation of context aware resource function which takes +// no action and returns no error. +func NoopContext(context.Context, *ResourceData, interface{}) diag.Diagnostics { + return nil +} + // RemoveFromState is a convenience implementation of a resource function // which sets the resource ID to empty string (to remove it from state) // and returns no error. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go similarity index 85% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go index a8a6e223..2be5278b 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go @@ -7,32 +7,32 @@ import ( "sync" "time" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/gocty" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // ResourceData is used to query and set the attributes of a resource. // // ResourceData is the primary argument received for CRUD operations on // a resource as well as configuration of a provider. It is a powerful -// structure that can be used to not only query data, but check for changes, -// define partial state updates, etc. +// structure that can be used to not only query data, but also check for changes // -// The most relevant methods to take a look at are Get, Set, and Partial. +// The most relevant methods to take a look at are Get and Set. type ResourceData struct { // Settable (internally) - schema map[string]*Schema - config *terraform.ResourceConfig - state *terraform.InstanceState - diff *terraform.InstanceDiff - meta map[string]interface{} - timeouts *ResourceTimeout + schema map[string]*Schema + config *terraform.ResourceConfig + state *terraform.InstanceState + diff *terraform.InstanceDiff + meta map[string]interface{} + timeouts *ResourceTimeout + providerMeta cty.Value // Don't set multiReader *MultiLevelFieldReader setWriter *MapFieldWriter newState *terraform.InstanceState - partial bool - partialMap map[string]struct{} once sync.Once isNew bool @@ -49,17 +49,6 @@ type getResult struct { Schema *Schema } -// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary -// values, bypassing schema. This MUST NOT be used in normal circumstances - -// it exists only to support the remote_state data source. -// -// Deprecated: Fully define schema attributes and use Set() instead. -func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { - d.once.Do(d.init) - - d.setWriter.unsafeWriteField(key, value) -} - // Get returns the data for the given key, or nil if the key doesn't exist // in the schema. // @@ -152,25 +141,6 @@ func (d *ResourceData) HasChange(key string) bool { return !reflect.DeepEqual(o, n) } -// Partial turns partial state mode on/off. -// -// When partial state mode is enabled, then only key prefixes specified -// by SetPartial will be in the final state. This allows providers to return -// partial states for partially applied resources (when errors occur). -// -// Deprecated: Partial state has very limited benefit given Terraform refreshes -// before operations by default. -func (d *ResourceData) Partial(on bool) { - d.partial = on - if on { - if d.partialMap == nil { - d.partialMap = make(map[string]struct{}) - } - } else { - d.partialMap = nil - } -} - // Set sets the value for the given key. // // If the key is invalid or the value is not a correct type, an error @@ -203,21 +173,6 @@ func (d *ResourceData) Set(key string, value interface{}) error { return err } -// SetPartial adds the key to the final state output while -// in partial state mode. The key must be a root key in the schema (i.e. -// it cannot be "list.0"). -// -// If partial state mode is disabled, then this has no effect. Additionally, -// whenever partial state mode is toggled, the partial data is cleared. -// -// Deprecated: Partial state has very limited benefit given Terraform refreshes -// before operations by default. -func (d *ResourceData) SetPartial(k string) { - if d.partial { - d.partialMap[k] = struct{}{} - } -} - func (d *ResourceData) MarkNewResource() { d.isNew = true } @@ -328,12 +283,6 @@ func (d *ResourceData) State() *terraform.InstanceState { rawMap := make(map[string]interface{}) for k := range d.schema { source := getSourceSet - if d.partial { - source = getSourceState - if _, ok := d.partialMap[k]; ok { - source = getSourceSet - } - } raw := d.get([]string{k}, source) if raw.Exists && !raw.Computed { @@ -560,3 +509,10 @@ func (d *ResourceData) get(addr []string, source getSource) getResult { Schema: schema, } } + +func (d *ResourceData) GetProviderMeta(dst interface{}) error { + if d.providerMeta.IsNull() { + return nil + } + return gocty.FromCtyValue(d.providerMeta, &dst) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go similarity index 99% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go index f55a66e1..984929df 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // newValueWriter is a minor re-implementation of MapFieldWriter to include diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go similarity index 61% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go index 5dada3ca..35c68ace 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go @@ -1,5 +1,11 @@ package schema +import ( + "context" + "errors" + "log" +) + // ResourceImporter defines how a resource is imported in Terraform. This // can be set onto a Resource struct to make it Importable. Not all resources // have to be importable; if a Resource doesn't have a ResourceImporter then @@ -9,15 +15,26 @@ package schema // resource and bringing it under Terraform management. This can include // updating Terraform state, generating Terraform configuration, etc. type ResourceImporter struct { - // The functions below must all be implemented for importing to work. - // State is called to convert an ID to one or more InstanceState to - // insert into the Terraform state. If this isn't specified, then - // the ID is passed straight through. + // insert into the Terraform state. + // + // Deprecated: State is deprecated in favor of StateContext. + // Only one of the two functions can bet set. State StateFunc + + // StateContext is called to convert an ID to one or more InstanceState to + // insert into the Terraform state. If this isn't specified, then + // the ID is passed straight through. This function receives a context + // that will cancel if Terraform sends a cancellation signal. + StateContext StateContextFunc } -// StateFunc is the function called to import a resource into the +// StateFunc is the function called to import a resource into the Terraform state. +// +// Deprecated: Please use the context aware equivalent StateContextFunc. +type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) + +// StateContextFunc is the function called to import a resource into the // Terraform state. It is given a ResourceData with only ID set. This // ID is going to be an arbitrary value given by the user and may not map // directly to the ID format that the resource expects, so that should @@ -31,7 +48,7 @@ type ResourceImporter struct { // // To create the ResourceData structures for other resource types (if // you have to), instantiate your resource and call the Data function. -type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) +type StateContextFunc func(context.Context, *ResourceData, interface{}) ([]*ResourceData, error) // InternalValidate should be called to validate the structure of this // importer. This should be called in a unit test. @@ -41,12 +58,27 @@ type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) // automatically called by Provider.InternalValidate(), so you only need // to internal validate the provider. func (r *ResourceImporter) InternalValidate() error { + if r.State != nil && r.StateContext != nil { + return errors.New("Both State and StateContext cannot be set.") + } + if r.State != nil && r.StateContext == nil { + log.Printf("[WARN] State is deprecated, please use StateContext") + } return nil } // ImportStatePassthrough is an implementation of StateFunc that can be +// used to simply pass the ID directly through. +// +// Deprecated: Please use the context aware ImportStatePassthroughContext instead +func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { + log.Printf("[WARN] ImportStatePassthrough is deprecated, please use ImportStatePassthroughContext") + return []*ResourceData{d}, nil +} + +// ImportStatePassthroughContext is an implementation of StateContextFunc that can be // used to simply pass the ID directly through. This should be used only // in the case that an ID-only refresh is possible. -func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { +func ImportStatePassthroughContext(ctx context.Context, d *ResourceData, m interface{}) ([]*ResourceData, error) { return []*ResourceData{d}, nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go similarity index 98% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go index f12bf725..cee0b678 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go @@ -5,9 +5,10 @@ import ( "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/mitchellh/copystructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go similarity index 75% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go index 69ab09b7..f39af0a9 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go @@ -14,47 +14,26 @@ package schema import ( "context" "fmt" + "log" "os" "reflect" "regexp" "sort" "strconv" "strings" - "sync" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/go-cty/cty" "github.com/mitchellh/copystructure" "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // Name of ENV variable which (if not empty) prefers panic over error const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" -// type used for schema package context keys -type contextKey string - -var ( - protoVersionMu sync.Mutex - protoVersion5 = false -) - -func isProto5() bool { - protoVersionMu.Lock() - defer protoVersionMu.Unlock() - return protoVersion5 - -} - -// SetProto5 enables a feature flag for any internal changes required required -// to work with the new plugin protocol. This should not be called by -// provider. -func SetProto5() { - protoVersionMu.Lock() - defer protoVersionMu.Unlock() - protoVersion5 = true -} - // Schema is used to describe the structure of a value. // // Read the documentation of the struct elements for important details. @@ -191,14 +170,6 @@ type Schema struct { MaxItems int MinItems int - // PromoteSingle originally allowed for a single element to be assigned - // where a primitive list was expected, but this no longer works from - // Terraform v0.12 onwards (Terraform Core will require a list to be set - // regardless of what this is set to) and so only applies to Terraform v0.11 - // and earlier, and so should be used only to retain this functionality - // for those still using v0.11 with a provider that formerly used this. - PromoteSingle bool - // The following fields are only valid for a TypeSet type. // // Set defines a function to determine the unique ID of an item so that @@ -223,9 +194,12 @@ type Schema struct { // // AtLeastOneOf is a set of schema keys that, when set, at least one of // the keys in that list must be specified. + // + // RequiredWith is a set of schema keys that must be set simultaneously. ConflictsWith []string ExactlyOneOf []string AtLeastOneOf []string + RequiredWith []string // When Deprecated is set, this attribute is deprecated. // @@ -234,14 +208,6 @@ type Schema struct { // how to address the deprecation. Deprecated string - // When Removed is set, this attribute has been removed from the schema - // - // Removed attributes can be left in the Schema to generate informative error - // messages for the user when they show up in resource configurations. - // This string is the message shown to the user with instructions on - // what do to about the removed attribute. - Removed string - // ValidateFunc allows individual fields to define arbitrary validation // logic. It is yielded the provided config value as an interface{} that is // guaranteed to be of the proper Schema type, and it can yield warnings or @@ -249,8 +215,33 @@ type Schema struct { // // ValidateFunc is honored only when the schema's Type is set to TypeInt, // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + // + // Deprecated: please use ValidateDiagFunc ValidateFunc SchemaValidateFunc + // ValidateDiagFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield diagnostics + // based on inspection of that value. + // + // ValidateDiagFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + // + // ValidateDiagFunc is also yielded the cty.Path the SDK has built up to this + // attribute. The SDK will automatically set the AttributePath of any returned + // Diagnostics to this path. Therefore the developer does not need to set + // the AttributePath for primitive types. + // + // In the case of TypeMap to provide the most precise information, please + // set an AttributePath with the additional cty.IndexStep: + // + // AttributePath: cty.IndexStringPath("key_name") + // + // Or alternatively use the passed in path to create the absolute path: + // + // AttributePath: append(path, cty.IndexStep{Key: cty.StringVal("key_name")}) + ValidateDiagFunc SchemaValidateDiagFunc + // Sensitive ensures that the attribute's value does not get displayed in // logs or regular output. It should be used for passwords or other // secret fields. Future versions of Terraform may encrypt these @@ -318,8 +309,14 @@ type SchemaStateFunc func(interface{}) string // SchemaValidateFunc is a function used to validate a single field in the // schema. +// +// Deprecated: please use SchemaValidateDiagFunc type SchemaValidateFunc func(interface{}, string) ([]string, []error) +// SchemaValidateDiagFunc is a function used to validate a single field in the +// schema and has Diagnostic support. +type SchemaValidateDiagFunc func(interface{}, cty.Path) diag.Diagnostics + func (s *Schema) GoString() string { return fmt.Sprintf("*%#v", *s) } @@ -434,6 +431,37 @@ func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *t return d } +func (s *Schema) validateFunc(decoded interface{}, k string, path cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + + if s.ValidateDiagFunc != nil { + diags = s.ValidateDiagFunc(decoded, path) + for i := range diags { + if !diags[i].AttributePath.HasPrefix(path) { + diags[i].AttributePath = append(path, diags[i].AttributePath...) + } + } + } else if s.ValidateFunc != nil { + ws, es := s.ValidateFunc(decoded, k) + for _, w := range ws { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: w, + AttributePath: path, + }) + } + for _, e := range es { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: e.Error(), + AttributePath: path, + }) + } + } + + return diags +} + // InternalMap is used to aid in the transition to the new schema types and // protocol. The name is not meant to convey any usefulness, as this is not to // be used directly by any providers. @@ -443,10 +471,18 @@ type InternalMap = schemaMap type schemaMap map[string]*Schema func (m schemaMap) panicOnError() bool { - if os.Getenv(PanicOnErr) != "" { + if env := os.Getenv(PanicOnErr); env == "" { + // default to true + return true + } else if b, err := strconv.ParseBool(env); err == nil { + // allow opt out + return b + } else { + // default to true for anything set, this is backwards compatible + // with the previous implementation + log.Printf("[WARN] %s=%s not parsable: %s, defaulting to true", PanicOnErr, env, err) return true } - return false } // Data returns a ResourceData for the given schema, state, and diff. @@ -476,6 +512,7 @@ func (m *schemaMap) DeepCopy() schemaMap { // Diff returns the diff for a resource given the schema map, // state, and configuration. func (m schemaMap) Diff( + ctx context.Context, s *terraform.InstanceState, c *terraform.ResourceConfig, customizeDiff CustomizeDiffFunc, @@ -515,7 +552,7 @@ func (m schemaMap) Diff( if !result.DestroyTainted && customizeDiff != nil { mc := m.DeepCopy() rd := newResourceDiff(mc, c, s, result) - if err := customizeDiff(rd, meta); err != nil { + if err := customizeDiff(ctx, rd, meta); err != nil { return nil, err } for _, k := range rd.UpdatedKeys() { @@ -556,7 +593,7 @@ func (m schemaMap) Diff( if !result2.DestroyTainted && customizeDiff != nil { mc := m.DeepCopy() rd := newResourceDiff(mc, c, d.state, result2) - if err := customizeDiff(rd, meta); err != nil { + if err := customizeDiff(ctx, rd, meta); err != nil { return nil, err } for _, k := range rd.UpdatedKeys() { @@ -619,71 +656,9 @@ func (m schemaMap) Diff( return result, nil } -// Input implements the terraform.ResourceProvider method by asking -// for input for required configuration keys that don't have a value. -func (m schemaMap) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := m[k] - - // Skip things that don't require config, if that is even valid - // for a provider schema. - // Required XOR Optional must always be true to validate, so we only - // need to check one. - if v.Optional { - continue - } - - // Deprecated fields should never prompt - if v.Deprecated != "" { - continue - } - - // Skip things that have a value of some sort already - if _, ok := c.Raw[k]; ok { - continue - } - - // Skip if it has a default value - defaultValue, err := v.DefaultValue() - if err != nil { - return nil, fmt.Errorf("%s: error loading default: %s", k, err) - } - if defaultValue != nil { - continue - } - - var value interface{} - switch v.Type { - case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList: - continue - case TypeString: - value, err = m.inputString(input, k, v) - default: - panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) - } - - if err != nil { - return nil, fmt.Errorf( - "%s: %s", k, err) - } - - c.Config[k] = value - } - - return c, nil -} - // Validate validates the configuration against this schema mapping. -func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { - return m.validateObject("", m, c) +func (m schemaMap) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + return m.validateObject("", m, c, cty.Path{}) } // InternalValidate validates the format of this schema. This should be called @@ -767,21 +742,28 @@ func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) erro } if len(v.ConflictsWith) > 0 { - err := checkKeysAgainstSchemaFlags(k, v.ConflictsWith, topSchemaMap) + err := checkKeysAgainstSchemaFlags(k, v.ConflictsWith, topSchemaMap, v, false) if err != nil { return fmt.Errorf("ConflictsWith: %+v", err) } } + if len(v.RequiredWith) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.RequiredWith, topSchemaMap, v, true) + if err != nil { + return fmt.Errorf("RequiredWith: %+v", err) + } + } + if len(v.ExactlyOneOf) > 0 { - err := checkKeysAgainstSchemaFlags(k, v.ExactlyOneOf, topSchemaMap) + err := checkKeysAgainstSchemaFlags(k, v.ExactlyOneOf, topSchemaMap, v, true) if err != nil { return fmt.Errorf("ExactlyOneOf: %+v", err) } } if len(v.AtLeastOneOf) > 0 { - err := checkKeysAgainstSchemaFlags(k, v.AtLeastOneOf, topSchemaMap) + err := checkKeysAgainstSchemaFlags(k, v.AtLeastOneOf, topSchemaMap, v, true) if err != nil { return fmt.Errorf("AtLeastOneOf: %+v", err) } @@ -820,44 +802,107 @@ func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) erro } } - // Computed-only field - if v.Computed && !v.Optional { - if v.ValidateFunc != nil { - return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ - "there's nothing to validate on computed-only field", k) + if v.Type == TypeMap && v.Elem != nil { + switch v.Elem.(type) { + case *Resource: + return fmt.Errorf("%s: TypeMap with Elem *Resource not supported,"+ + "use TypeList/TypeSet with Elem *Resource or TypeMap with Elem *Schema", k) + } + } + + if computedOnly { + if len(v.AtLeastOneOf) > 0 { + return fmt.Errorf("%s: AtLeastOneOf is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if len(v.ConflictsWith) > 0 { + return fmt.Errorf("%s: ConflictsWith is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.Default != nil { + return fmt.Errorf("%s: Default is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.DefaultFunc != nil { + return fmt.Errorf("%s: DefaultFunc is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) } if v.DiffSuppressFunc != nil { return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ " between config and state representation. "+ "There is no config for computed-only field, nothing to compare.", k) } + if len(v.ExactlyOneOf) > 0 { + return fmt.Errorf("%s: ExactlyOneOf is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.InputDefault != "" { + return fmt.Errorf("%s: InputDefault is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.MaxItems > 0 { + return fmt.Errorf("%s: MaxItems is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.MinItems > 0 { + return fmt.Errorf("%s: MinItems is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.StateFunc != nil { + return fmt.Errorf("%s: StateFunc is extraneous, "+ + "value should just be changed before setting on computed-only field", k) + } + if v.ValidateFunc != nil { + return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + if v.ValidateDiagFunc != nil { + return fmt.Errorf("%s: ValidateDiagFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } } - if v.ValidateFunc != nil { + if v.ValidateFunc != nil || v.ValidateDiagFunc != nil { switch v.Type { case TypeList, TypeSet: - return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k) + return fmt.Errorf("%s: ValidateFunc and ValidateDiagFunc are not yet supported on lists or sets.", k) } } - if v.Deprecated == "" && v.Removed == "" { + if v.ValidateFunc != nil && v.ValidateDiagFunc != nil { + return fmt.Errorf("%s: ValidateFunc and ValidateDiagFunc cannot both be set", k) + } + + if v.Deprecated == "" { if !isValidFieldName(k) { return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) } } + + // Warn of deprecations + if v.ValidateFunc != nil && v.ValidateDiagFunc == nil { + log.Printf("[WARN] ValidateFunc is deprecated, please use ValidateDiagFunc") + } + } return nil } -func checkKeysAgainstSchemaFlags(k string, keys []string, topSchemaMap schemaMap) error { +func checkKeysAgainstSchemaFlags(k string, keys []string, topSchemaMap schemaMap, self *Schema, allowSelfReference bool) error { for _, key := range keys { parts := strings.Split(key, ".") sm := topSchemaMap var target *Schema - for _, part := range parts { - // Skip index fields - if _, err := strconv.Atoi(part); err == nil { + for idx, part := range parts { + // Skip index fields if 0 + partInt, err := strconv.Atoi(part) + + if err == nil { + if partInt != 0 { + return fmt.Errorf("%s configuration block reference (%s) can only use the .0. index for TypeList and MaxItems: 1 configuration blocks", k, key) + } + continue } @@ -866,13 +911,28 @@ func checkKeysAgainstSchemaFlags(k string, keys []string, topSchemaMap schemaMap return fmt.Errorf("%s references unknown attribute (%s) at part (%s)", k, key, part) } - if subResource, ok := target.Elem.(*Resource); ok { - sm = schemaMap(subResource.Schema) + subResource, ok := target.Elem.(*Resource) + + if !ok { + continue + } + + // Skip Type/MaxItems check if not the last element + if (target.Type == TypeSet || target.MaxItems != 1) && idx+1 != len(parts) { + return fmt.Errorf("%s configuration block reference (%s) can only be used with TypeList and MaxItems: 1 configuration blocks", k, key) } + + sm = schemaMap(subResource.Schema) } + if target == nil { return fmt.Errorf("%s cannot find target attribute (%s), sm: %#v", k, key, sm) } + + if target == self && !allowSelfReference { + return fmt.Errorf("%s cannot reference self (%s)", k, key) + } + if target.Required { return fmt.Errorf("%s cannot contain Required attribute (%s)", k, key) } @@ -881,6 +941,7 @@ func checkKeysAgainstSchemaFlags(k string, keys []string, topSchemaMap schemaMap return fmt.Errorf("%s cannot contain Computed(When) attribute (%s)", k, key) } } + return nil } @@ -1358,32 +1419,26 @@ func (m schemaMap) diffString( return nil } -func (m schemaMap) inputString( - input terraform.UIInput, - k string, - schema *Schema) (interface{}, error) { - result, err := input.Input(context.Background(), &terraform.InputOpts{ - Id: k, - Query: k, - Description: schema.Description, - Default: schema.InputDefault, - }) - - return result, err -} - func (m schemaMap) validate( k string, schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + raw, ok := c.Get(k) if !ok && schema.DefaultFunc != nil { // We have a dynamic default. Check if we have a value. var err error raw, err = schema.DefaultFunc() if err != nil { - return nil, []error{fmt.Errorf( - "%q, error loading default: %s", k, err)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Loading Default", + Detail: err.Error(), + AttributePath: path, + }) } // We're okay as long as we had a value set @@ -1392,26 +1447,52 @@ func (m schemaMap) validate( err := validateExactlyOneAttribute(k, schema, c) if err != nil { - return nil, []error{err} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "ExactlyOne", + Detail: err.Error(), + AttributePath: path, + }) } err = validateAtLeastOneAttribute(k, schema, c) if err != nil { - return nil, []error{err} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "AtLeastOne", + Detail: err.Error(), + AttributePath: path, + }) } if !ok { if schema.Required { - return nil, []error{fmt.Errorf( - "%q: required field is not set", k)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Required attribute is not set", + AttributePath: path, + }) } - return nil, nil + return diags } if !schema.Required && !schema.Optional { // This is a computed-only field - return nil, []error{fmt.Errorf( - "%q: this field cannot be set", k)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Computed attribute cannot be set", + AttributePath: path, + }) + } + + err = validateRequiredWithAttribute(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "RequiredWith", + Detail: err.Error(), + AttributePath: path, + }) } // If the value is unknown then we can't validate it yet. @@ -1421,17 +1502,27 @@ func (m schemaMap) validate( // Required fields set via an interpolated value are accepted. if !isWhollyKnown(raw) { if schema.Deprecated != "" { - return []string{fmt.Sprintf("%q: [DEPRECATED] %s", k, schema.Deprecated)}, nil + return append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Attribute is deprecated", + Detail: schema.Deprecated, + AttributePath: path, + }) } - return nil, nil + return diags } err = validateConflictingAttributes(k, schema, c) if err != nil { - return nil, []error{err} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "ConflictsWith", + Detail: err.Error(), + AttributePath: path, + }) } - return m.validateType(k, raw, schema, c) + return m.validateType(k, raw, schema, c, path) } // isWhollyKnown returns false if the argument contains an UnknownVariableValue @@ -1494,6 +1585,27 @@ func removeDuplicates(elements []string) []string { return result } +func validateRequiredWithAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.RequiredWith) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.RequiredWith, k)) + sort.Strings(allKeys) + + for _, key := range allKeys { + if _, ok := c.Get(key); !ok { + return fmt.Errorf("%q: all of `%s` must be specified", k, strings.Join(allKeys, ",")) + } + } + + return nil +} + func validateExactlyOneAttribute( k string, schema *Schema, @@ -1557,33 +1669,33 @@ func (m schemaMap) validateList( k string, raw interface{}, schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + // first check if the list is wholly unknown if s, ok := raw.(string); ok { if s == hcl2shim.UnknownVariableValue { - return nil, nil + return diags } } // schemaMap can't validate nil if raw == nil { - return nil, nil + return diags } // We use reflection to verify the slice because you can't // case to []interface{} unless the slice is exactly that type. rawV := reflect.ValueOf(raw) - // If we support promotion and the raw value isn't a slice, wrap - // it in []interface{} and check again. - if schema.PromoteSingle && rawV.Kind() != reflect.Slice { - raw = []interface{}{raw} - rawV = reflect.ValueOf(raw) - } - if rawV.Kind() != reflect.Slice { - return nil, []error{fmt.Errorf( - "%s: should be a list", k)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a list", + AttributePath: path, + }) } // We can't validate list length if this came from a dynamic block. @@ -1591,19 +1703,27 @@ func (m schemaMap) validateList( // at this point, we're going to skip validation in the new protocol if // there are any unknowns. Validate will eventually be called again once // all values are known. - if isProto5() && !isWhollyKnown(raw) { - return nil, nil + if !isWhollyKnown(raw) { + return diags } // Validate length if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { - return nil, []error{fmt.Errorf( - "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "List longer than MaxItems", + Detail: fmt.Sprintf("Attribute supports %d item maximum, config has %d declared", schema.MaxItems, rawV.Len()), + AttributePath: path, + }) } if schema.MinItems > 0 && rawV.Len() < schema.MinItems { - return nil, []error{fmt.Errorf( - "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "List shorter than MinItems", + Detail: fmt.Sprintf("Attribute supports %d item minimum, config has %d declared", schema.MinItems, rawV.Len()), + AttributePath: path, + }) } // Now build the []interface{} @@ -1612,8 +1732,6 @@ func (m schemaMap) validateList( raws[i] = rawV.Index(i).Interface() } - var ws []string - var es []error for i, raw := range raws { key := fmt.Sprintf("%s.%d", k, i) @@ -1624,42 +1742,40 @@ func (m schemaMap) validateList( raw = r } - var ws2 []string - var es2 []error + p := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + switch t := schema.Elem.(type) { case *Resource: // This is a sub-resource - ws2, es2 = m.validateObject(key, t.Schema, c) + diags = append(diags, m.validateObject(key, t.Schema, c, p)...) case *Schema: - ws2, es2 = m.validateType(key, raw, t, c) + diags = append(diags, m.validateType(key, raw, t, c, p)...) } - if len(ws2) > 0 { - ws = append(ws, ws2...) - } - if len(es2) > 0 { - es = append(es, es2...) - } } - return ws, es + return diags } func (m schemaMap) validateMap( k string, raw interface{}, schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + // first check if the list is wholly unknown if s, ok := raw.(string); ok { if s == hcl2shim.UnknownVariableValue { - return nil, nil + return diags } } // schemaMap can't validate nil if raw == nil { - return nil, nil + return diags } // We use reflection to verify the slice because you can't // case to []interface{} unless the slice is exactly that type. @@ -1670,26 +1786,33 @@ func (m schemaMap) validateMap( // be rejected. reified, reifiedOk := c.Get(k) if reifiedOk && raw == reified && !c.IsComputed(k) { - return nil, []error{fmt.Errorf("%s: should be a map", k)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a map", + AttributePath: path, + }) } // Otherwise it's likely raw is an interpolation. - return nil, nil + return diags case reflect.Map: case reflect.Slice: default: - return nil, []error{fmt.Errorf("%s: should be a map", k)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a map", + AttributePath: path, + }) } // If it is not a slice, validate directly if rawV.Kind() != reflect.Slice { mapIface := rawV.Interface() - if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { - return nil, errs - } - if schema.ValidateFunc != nil { - return schema.ValidateFunc(mapIface, k) + diags = append(diags, validateMapValues(k, mapIface.(map[string]interface{}), schema, path)...) + if diags.HasError() { + return diags } - return nil, nil + + return schema.validateFunc(mapIface, k, path) } // It is a slice, verify that all the elements are maps @@ -1701,62 +1824,86 @@ func (m schemaMap) validateMap( for _, raw := range raws { v := reflect.ValueOf(raw) if v.Kind() != reflect.Map { - return nil, []error{fmt.Errorf( - "%s: should be a map", k)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a map", + AttributePath: path, + }) } mapIface := v.Interface() - if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { - return nil, errs + diags = append(diags, validateMapValues(k, mapIface.(map[string]interface{}), schema, path)...) + if diags.HasError() { + return diags } } - if schema.ValidateFunc != nil { - validatableMap := make(map[string]interface{}) - for _, raw := range raws { - for k, v := range raw.(map[string]interface{}) { - validatableMap[k] = v - } + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v } - - return schema.ValidateFunc(validatableMap, k) } - return nil, nil + return schema.validateFunc(validatableMap, k, path) } -func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) { +func validateMapValues(k string, m map[string]interface{}, schema *Schema, path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + for key, raw := range m { valueType, err := getValueType(k, schema) + p := append(path, cty.IndexStep{Key: cty.StringVal(key)}) if err != nil { - return nil, []error{err} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) } switch valueType { case TypeBool: var n bool if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) } case TypeInt: var n int if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) } case TypeFloat: var n float64 if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) } case TypeString: var n string if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) } default: panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) } } - return nil, nil + return diags } func getValueType(k string, schema *Schema) (ValueType, error) { @@ -1785,35 +1932,33 @@ func getValueType(k string, schema *Schema) (ValueType, error) { func (m schemaMap) validateObject( k string, schema map[string]*Schema, - c *terraform.ResourceConfig) ([]string, []error) { + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + raw, _ := c.Get(k) // schemaMap can't validate nil if raw == nil { - return nil, nil + return diags } if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { - return nil, []error{fmt.Errorf( - "%s: expected object, got %s", - k, reflect.ValueOf(raw).Kind())} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Expected Object Type", + Detail: fmt.Sprintf("Expected object, got %s", reflect.ValueOf(raw).Kind()), + AttributePath: path, + }) } - var ws []string - var es []error for subK, s := range schema { key := subK if k != "" { key = fmt.Sprintf("%s.%s", k, subK) } - - ws2, es2 := m.validate(key, s, c) - if len(ws2) > 0 { - ws = append(ws, ws2...) - } - if len(es2) > 0 { - es = append(es, es2...) - } + diags = append(diags, m.validate(key, s, c, append(path, cty.GetAttrStep{Name: subK}))...) } // Detect any extra/unknown keys and report those as errors. @@ -1823,26 +1968,32 @@ func (m schemaMap) validateObject( if subk == TimeoutsConfigKey { continue } - es = append(es, fmt.Errorf( - "%s: invalid or unknown key: %s", k, subk)) + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Invalid or unknown key", + AttributePath: append(path, cty.GetAttrStep{Name: subk}), + }) } } } - return ws, es + return diags } func (m schemaMap) validatePrimitive( k string, raw interface{}, schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics // a nil value shouldn't happen in the old protocol, and in the new // protocol the types have already been validated. Either way, we can't // reflect on nil, so don't panic. if raw == nil { - return nil, nil + return diags } // Catch if the user gave a complex type where a primitive was @@ -1850,20 +2001,24 @@ func (m schemaMap) validatePrimitive( // doesn't contain Go type system terminology. switch reflect.ValueOf(raw).Type().Kind() { case reflect.Slice: - return nil, []error{ - fmt.Errorf("%s must be a single value, not a list", k), - } + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a single value, not a list", + AttributePath: path, + }) case reflect.Map: - return nil, []error{ - fmt.Errorf("%s must be a single value, not a map", k), - } + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a single value, not a map", + AttributePath: path, + }) default: // ok } if c.IsComputed(k) { // If the key is being computed, then it is not an error as // long as it's not a slice or map. - return nil, nil + return diags } var decoded interface{} @@ -1872,81 +2027,91 @@ func (m schemaMap) validatePrimitive( // Verify that we can parse this as the correct type var n bool if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) } decoded = n case TypeInt: - switch { - case isProto5(): - // We need to verify the type precisely, because WeakDecode will - // decode a float as an integer. - - // the config shims only use int for integral number values - if v, ok := raw.(int); ok { - decoded = v - } else { - return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)} - } - default: - // Verify that we can parse this as an int - var n int - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n + // We need to verify the type precisely, because WeakDecode will + // decode a float as an integer. + + // the config shims only use int for integral number values + if v, ok := raw.(int); ok { + decoded = v + } else { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("Attribute must be a whole number, got %v", raw), + AttributePath: path, + }) } case TypeFloat: // Verify that we can parse this as an int var n float64 if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) } decoded = n case TypeString: // Verify that we can parse this as a string var n string if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) } decoded = n default: panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) } - if schema.ValidateFunc != nil { - return schema.ValidateFunc(decoded, k) - } - - return nil, nil + return append(diags, schema.validateFunc(decoded, k, path)...) } func (m schemaMap) validateType( k string, raw interface{}, schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - var ws []string - var es []error + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics switch schema.Type { - case TypeSet, TypeList: - ws, es = m.validateList(k, raw, schema, c) + case TypeList: + diags = m.validateList(k, raw, schema, c, path) + case TypeSet: + // indexing into sets is not representable in the current protocol + // best we can do is associate the path up to this attribute. + diags = m.validateList(k, raw, schema, c, path) + log.Printf("[WARN] Truncating attribute path of %d diagnostics for TypeSet", len(diags)) + for i := range diags { + diags[i].AttributePath = path + } case TypeMap: - ws, es = m.validateMap(k, raw, schema, c) + diags = m.validateMap(k, raw, schema, c, path) default: - ws, es = m.validatePrimitive(k, raw, schema, c) + diags = m.validatePrimitive(k, raw, schema, c, path) } if schema.Deprecated != "" { - ws = append(ws, fmt.Sprintf( - "%q: [DEPRECATED] %s", k, schema.Deprecated)) - } - - if schema.Removed != "" { - es = append(es, fmt.Errorf( - "%q: [REMOVED] %s", k, schema.Removed)) + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Deprecated Attribute", + Detail: schema.Deprecated, + AttributePath: path, + }) } - return ws, es + return diags } // Zero returns the zero value for a type. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go similarity index 94% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go index fe6d7504..0e0e3cca 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go @@ -91,7 +91,12 @@ func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Reso sm := resource.Schema m := val.(map[string]interface{}) var keys []string - for k := range sm { + allComputed := true + for k, v := range sm { + if v.Optional || v.Required { + allComputed = false + } + keys = append(keys, k) } sort.Strings(keys) @@ -100,7 +105,7 @@ func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Reso // Skip attributes that are not user-provided. Computed attributes // do not contribute to the hash since their ultimate value cannot // be known at plan/diff time. - if !(innerSchema.Required || innerSchema.Optional) { + if !allComputed && !(innerSchema.Required || innerSchema.Optional) { continue } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go similarity index 85% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go index daa431dd..1518391e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go @@ -8,7 +8,7 @@ import ( "strconv" "sync" - "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/hashcode" ) // HashString hashes strings. If you want a Set of strings, this is the @@ -150,13 +150,46 @@ func (s *Set) Union(other *Set) *Set { return result } +func checkSetMapEqual(m1, m2 map[string]interface{}) bool { + if (m1 == nil) != (m2 == nil) { + return false + } + if len(m1) != len(m2) { + return false + } + for k := range m1 { + v1 := m1[k] + v2, ok := m2[k] + if !ok { + return false + } + switch v1.(type) { + case map[string]interface{}: + same := checkSetMapEqual(v1.(map[string]interface{}), v2.(map[string]interface{})) + if !same { + return false + } + case *Set: + same := v1.(*Set).Equal(v2) + if !same { + return false + } + default: + same := reflect.DeepEqual(v1, v2) + if !same { + return false + } + } + } + return true +} + func (s *Set) Equal(raw interface{}) bool { other, ok := raw.(*Set) if !ok { return false } - - return reflect.DeepEqual(s.m, other.m) + return checkSetMapEqual(s.m, other.m) } // HashEqual simply checks to the keys the top-level map to the keys in the @@ -198,16 +231,13 @@ func (s *Set) add(item interface{}, computed bool) string { code := s.hash(item) if computed { code = "~" + code - - if isProto5() { - tmpCode := code - count := 0 - for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { - count++ - tmpCode = fmt.Sprintf("%s%d", code, count) - } - code = tmpCode + tmpCode := code + count := 0 + for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { + count++ + tmpCode = fmt.Sprintf("%s%d", code, count) } + code = tmpCode } if _, ok := s.m[code]; !ok { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go similarity index 81% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go index 93c601f8..40a5abc0 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go @@ -1,29 +1,30 @@ package schema import ( + "context" "encoding/json" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/hashicorp/go-cty/cty" + ctyjson "github.com/hashicorp/go-cty/cty/json" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // DiffFromValues takes the current state and desired state as cty.Values and // derives a terraform.InstanceDiff to give to the legacy providers. This is // used to take the states provided by the new ApplyResourceChange method and // convert them to a state+diff required for the legacy Apply method. -func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) { - return diffFromValues(prior, planned, res, nil) +func DiffFromValues(ctx context.Context, prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) { + return diffFromValues(ctx, prior, planned, res, nil) } // diffFromValues takes an additional CustomizeDiffFunc, so we can generate our // test fixtures from the legacy tests. In the new provider protocol the diff // only needs to be created for the apply operation, and any customizations // have already been done. -func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { +func diffFromValues(ctx context.Context, prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { instanceState, err := res.ShimInstanceStateFromValue(prior) if err != nil { return nil, err @@ -35,7 +36,7 @@ func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffF removeConfigUnknowns(cfg.Config) removeConfigUnknowns(cfg.Raw) - diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false) + diff, err := schemaMap(res.Schema).Diff(ctx, instanceState, cfg, cust, nil, false) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go similarity index 56% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go index 4d0fd736..27f9e882 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go @@ -1,20 +1,22 @@ package schema import ( - "testing" + "context" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // TestResourceDataRaw creates a ResourceData from a raw configuration map. func TestResourceDataRaw( - t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { t.Helper() c := terraform.NewResourceConfigRaw(raw) sm := schemaMap(schema) - diff, err := sm.Diff(nil, c, nil, nil, true) + diff, err := sm.Diff(context.Background(), nil, c, nil, nil, true) if err != nil { t.Fatalf("err: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go new file mode 100644 index 00000000..064aeda2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go @@ -0,0 +1,47 @@ +package addrs + +import ( + "fmt" +) + +// instanceKey represents the key of an instance within an object that +// contains multiple instances due to using "count" or "for_each" arguments +// in configuration. +// +// intKey and stringKey are the two implementations of this type. No other +// implementations are allowed. The single instance of an object that _isn't_ +// using "count" or "for_each" is represented by NoKey, which is a nil +// InstanceKey. +type instanceKey interface { + instanceKeySigil() + String() string +} + +// NoKey represents the absense of an instanceKey, for the single instance +// of a configuration object that does not use "count" or "for_each" at all. +var NoKey instanceKey + +// intKey is the InstanceKey representation representing integer indices, as +// used when the "count" argument is specified or if for_each is used with +// a sequence type. +type intKey int + +func (k intKey) instanceKeySigil() { +} + +func (k intKey) String() string { + return fmt.Sprintf("[%d]", int(k)) +} + +// stringKey is the InstanceKey representation representing string indices, as +// used when the "for_each" argument is specified with a map or object type. +type stringKey string + +func (k stringKey) instanceKeySigil() { +} + +func (k stringKey) String() string { + // FIXME: This isn't _quite_ right because Go's quoted string syntax is + // slightly different than HCL's, but we'll accept it for now. + return fmt.Sprintf("[%q]", string(k)) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go new file mode 100644 index 00000000..98699f46 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go @@ -0,0 +1,13 @@ +package addrs + +// Module is an address for a module call within configuration. This is +// the static counterpart of ModuleInstance, representing a traversal through +// the static module call tree in configuration and does not take into account +// the potentially-multiple instances of a module that might be created by +// "count" and "for_each" arguments within those calls. +// +// This type should be used only in very specialized cases when working with +// the static module call tree. Type ModuleInstance is appropriate in more cases. +// +// Although Module is a slice, it should be treated as immutable after creation. +type Module []string diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go new file mode 100644 index 00000000..f31d833d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go @@ -0,0 +1,241 @@ +package addrs + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags" +) + +// ModuleInstance is an address for a particular module instance within the +// dynamic module tree. This is an extension of the static traversals +// represented by type Module that deals with the possibility of a single +// module call producing multiple instances via the "count" and "for_each" +// arguments. +// +// Although ModuleInstance is a slice, it should be treated as immutable after +// creation. +type ModuleInstance []ModuleInstanceStep + +func parseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { + mi, remain, diags := parseModuleInstancePrefix(traversal) + if len(remain) != 0 { + if len(remain) == len(traversal) { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid module instance address", + "A module instance address must begin with \"module.\".", + )) + } else { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid module instance address", + "The module instance address is followed by additional invalid content.", + )) + } + } + return mi, diags +} + +// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + for _, err := range parseDiags.Errs() { + // ignore warnings, they don't matter in this case + diags = append(diags, tfdiags.FromError(err)) + } + if parseDiags.HasErrors() { + return nil, diags + } + + addr, addrDiags := parseModuleInstance(traversal) + diags = append(diags, addrDiags...) + return addr, diags +} + +func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var mi ModuleInstance + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + var next string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + next = tt.Name + case hcl.TraverseAttr: + next = tt.Name + default: + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Module address prefix must be followed by dot and then a name.", + )) + break + } + + if next != "module" { + break + } + + remain = remain[1:] + // If we have the prefix "module" then we should be followed by an + // module call name, as an attribute, and then optionally an index step + // giving the instance key. + if len(remain) == 0 { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Prefix \"module.\" must be followed by a module name.", + )) + break + } + + var moduleName string + switch tt := remain[0].(type) { + case hcl.TraverseAttr: + moduleName = tt.Name + default: + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Prefix \"module.\" must be followed by a module name.", + )) + break + } + remain = remain[1:] + step := ModuleInstanceStep{ + Name: moduleName, + } + + if len(remain) > 0 { + if idx, ok := remain[0].(hcl.TraverseIndex); ok { + remain = remain[1:] + + switch idx.Key.Type() { + case cty.String: + step.InstanceKey = stringKey(idx.Key.AsString()) + case cty.Number: + var idxInt int + err := gocty.FromCtyValue(idx.Key, &idxInt) + if err == nil { + step.InstanceKey = intKey(idxInt) + } else { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + fmt.Sprintf("Invalid module index: %s.", err), + )) + } + default: + // Should never happen, because no other types are allowed in traversal indices. + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Invalid module key: must be either a string or an integer.", + )) + } + } + } + + mi = append(mi, step) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return mi, retRemain, diags +} + +// UnkeyedInstanceShim is a shim method for converting a Module address to the +// equivalent ModuleInstance address that assumes that no modules have +// keyed instances. +// +// This is a temporary allowance for the fact that Terraform does not presently +// support "count" and "for_each" on modules, and thus graph building code that +// derives graph nodes from configuration must just assume unkeyed modules +// in order to construct the graph. At a later time when "count" and "for_each" +// support is added for modules, all callers of this method will need to be +// reworked to allow for keyed module instances. +func (m Module) UnkeyedInstanceShim() ModuleInstance { + path := make(ModuleInstance, len(m)) + for i, name := range m { + path[i] = ModuleInstanceStep{Name: name} + } + return path +} + +// ModuleInstanceStep is a single traversal step through the dynamic module +// tree. It is used only as part of ModuleInstance. +type ModuleInstanceStep struct { + Name string + InstanceKey instanceKey +} + +// RootModuleInstance is the module instance address representing the root +// module, which is also the zero value of ModuleInstance. +var RootModuleInstance ModuleInstance + +// Child returns the address of a child module instance of the receiver, +// identified by the given name and key. +func (m ModuleInstance) Child(name string, key instanceKey) ModuleInstance { + ret := make(ModuleInstance, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, ModuleInstanceStep{ + Name: name, + InstanceKey: key, + }) +} + +// String returns a string representation of the receiver, in the format used +// within e.g. user-provided resource addresses. +// +// The address of the root module has the empty string as its representation. +func (m ModuleInstance) String() string { + var buf bytes.Buffer + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step.Name) + if step.InstanceKey != NoKey { + buf.WriteString(step.InstanceKey.String()) + } + sep = "." + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go new file mode 100644 index 00000000..f56032b5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go @@ -0,0 +1,130 @@ +package addrs + +import ( + "fmt" +) + +// resource is an address for a resource block within configuration, which +// contains potentially-multiple resource instances if that configuration +// block uses "count" or "for_each". +type resource struct { + Mode resourceMode + Type string + Name string +} + +func (r resource) String() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // Should never happen, but we'll return a string here rather than + // crashing just in case it does. + return fmt.Sprintf(".%s.%s", r.Type, r.Name) + } +} + +// resourceInstance is an address for a specific instance of a resource. +// When a resource is defined in configuration with "count" or "for_each" it +// produces zero or more instances, which can be addressed using this type. +type resourceInstance struct { + Resource resource + Key instanceKey +} + +func (r resourceInstance) ContainingResource() resource { + return r.Resource +} + +func (r resourceInstance) String() string { + if r.Key == NoKey { + return r.Resource.String() + } + return r.Resource.String() + r.Key.String() +} + +// absResource is an absolute address for a resource under a given module path. +type absResource struct { + Module ModuleInstance + Resource resource +} + +// Resource returns the address of a particular resource within the receiver. +func (m ModuleInstance) Resource(mode resourceMode, typeName string, name string) absResource { + return absResource{ + Module: m, + Resource: resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + } +} + +func (r absResource) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +// absResourceInstance is an absolute address for a resource instance under a +// given module path. +type absResourceInstance struct { + Module ModuleInstance + Resource resourceInstance +} + +// ResourceInstance returns the address of a particular resource instance within the receiver. +func (m ModuleInstance) ResourceInstance(mode resourceMode, typeName string, name string, key instanceKey) absResourceInstance { + return absResourceInstance{ + Module: m, + Resource: resourceInstance{ + Resource: resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + Key: key, + }, + } +} + +// ContainingResource returns the address of the resource that contains the +// receving resource instance. In other words, it discards the key portion +// of the address to produce an absResource value. +func (r absResourceInstance) ContainingResource() absResource { + return absResource{ + Module: r.Module, + Resource: r.Resource.ContainingResource(), + } +} + +func (r absResourceInstance) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +// resourceMode defines which lifecycle applies to a given resource. Each +// resource lifecycle has a slightly different address format. +type resourceMode rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type resourceMode + +const ( + // InvalidResourceMode is the zero value of ResourceMode and is not + // a valid resource mode. + InvalidResourceMode resourceMode = 0 + + // ManagedResourceMode indicates a managed resource, as defined by + // "resource" blocks in configuration. + ManagedResourceMode resourceMode = 'M' + + // DataResourceMode indicates a data resource, as defined by + // "data" blocks in configuration. + DataResourceMode resourceMode = 'D' +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go new file mode 100644 index 00000000..3b2e65c3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type resourceMode"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidResourceMode-0] + _ = x[ManagedResourceMode-77] + _ = x[DataResourceMode-68] +} + +const ( + _resourceMode_name_0 = "InvalidResourceMode" + _resourceMode_name_1 = "DataResourceMode" + _resourceMode_name_2 = "ManagedResourceMode" +) + +func (i resourceMode) String() string { + switch { + case i == 0: + return _resourceMode_name_0 + case i == 68: + return _resourceMode_name_1 + case i == 77: + return _resourceMode_name_2 + default: + return "resourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go similarity index 98% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go index 41a53374..48278abe 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go @@ -3,8 +3,8 @@ package configschema import ( "fmt" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" ) // CoerceValue attempts to force the given value to conform to the type diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go similarity index 98% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go index 005da56b..51b8c5d2 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go @@ -1,7 +1,7 @@ package configschema import ( - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" ) // EmptyValue returns the "empty value" for the recieving block, which for diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go new file mode 100644 index 00000000..edc9dadc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go @@ -0,0 +1,68 @@ +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + atys := make(map[string]cty.Type) + + for name, attrS := range b.Attributes { + atys[name] = attrS.Type + } + + for name, blockS := range b.BlockTypes { + if _, exists := atys[name]; exists { + panic("invalid schema, blocks and attributes cannot have the same name") + } + + childType := blockS.Block.ImpliedType() + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + atys[name] = childType + case NestingList: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, which means our type _constraint_ must be + // cty.DynamicPseudoType to allow the tuple type to be decided + // separately for each value. + if childType.HasDynamicTypes() { + atys[name] = cty.DynamicPseudoType + } else { + atys[name] = cty.List(childType) + } + case NestingSet: + if childType.HasDynamicTypes() { + panic("can't use cty.DynamicPseudoType inside a block type with NestingSet") + } + atys[name] = cty.Set(childType) + case NestingMap: + // We prefer to use a map where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use an object + // instead, which means our type _constraint_ must be + // cty.DynamicPseudoType to allow the tuple type to be decided + // separately for each value. + if childType.HasDynamicTypes() { + atys[name] = cty.DynamicPseudoType + } else { + atys[name] = cty.Map(childType) + } + default: + panic("invalid nesting type") + } + } + + return cty.Object(atys) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/nestingmode_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/nestingmode_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/nestingmode_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go similarity index 99% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go index b41a3096..6cddb9c6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go @@ -1,7 +1,7 @@ package configschema import ( - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" ) // StringKind represents the format a string is in. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go similarity index 99% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go index bb4228d9..e620e76a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go @@ -5,9 +5,8 @@ import ( "strconv" "strings" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" ) // FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go similarity index 99% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go index 3403c026..e557845a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" ) // RequiresReplace takes a list of flatmapped paths from a diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go similarity index 98% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go index a074c749..91e91547 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go @@ -4,9 +4,9 @@ import ( "fmt" "math/big" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" ) // UnknownVariableValue is a sentinel value that can be used diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go similarity index 99% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go index 92f0213d..87638b4e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go @@ -1,7 +1,7 @@ package hcl2shim import ( - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" ) // ValuesSDKEquivalent returns true if both of the given values seem equivalent diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/diagutils/diagutils.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/diagutils/diagutils.go new file mode 100644 index 00000000..6b69e2f1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/diagutils/diagutils.go @@ -0,0 +1,45 @@ +package diagutils + +import ( + "errors" + "fmt" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" +) + +type ErrorDiags diag.Diagnostics + +func (diags ErrorDiags) Errors() []error { + var es []error + for i := range diags { + if diags[i].Severity == diag.Error { + s := fmt.Sprintf("Error: %s", diags[i].Summary) + if diags[i].Detail != "" { + s = fmt.Sprintf("%s: %s", s, diags[i].Detail) + } + es = append(es, errors.New(s)) + } + } + return es +} + +func (diags ErrorDiags) Error() string { + return multierror.ListFormatFunc(diags.Errors()) +} + +type WarningDiags diag.Diagnostics + +func (diags WarningDiags) Warnings() []string { + var ws []string + for i := range diags { + if diags[i].Severity == diag.Warning { + s := fmt.Sprintf("Warning: %s", diags[i].Summary) + if diags[i].Detail != "" { + s = fmt.Sprintf("%s: %s", s, diags[i].Detail) + } + ws = append(ws, s) + } + } + return ws +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/doc.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/doc.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/grpc_provider.go similarity index 85% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/grpc_provider.go index 388f1ed5..b013fce6 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/grpc_provider.go @@ -5,46 +5,59 @@ import ( "fmt" "log" "strconv" + "sync" - "github.com/zclconf/go-cty/cty" - ctyconvert "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/msgpack" + "github.com/hashicorp/go-cty/cty" + ctyconvert "github.com/hashicorp/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty/msgpack" context "golang.org/x/net/context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert" + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) const newExtraKey = "_new_extra_shim" -// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a -// proto.ProviderServer implementation. If the provided provider is not a -// *schema.Provider, this will return nil, -func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer { - sp, ok := p.(*schema.Provider) - if !ok { - return nil - } - +func NewGRPCProviderServer(p *schema.Provider) *GRPCProviderServer { return &GRPCProviderServer{ - provider: sp, + provider: p, + stopCh: make(chan struct{}), } } // GRPCProviderServer handles the server, or plugin side of the rpc connection. type GRPCProviderServer struct { provider *schema.Provider + stopCh chan struct{} + stopMu sync.Mutex +} + +func mergeStop(stopCh chan struct{}, cancel context.CancelFunc) { + <-stopCh + cancel() +} + +// StopContext derives a new context from the passed in grpc context. +// It creates a goroutine to wait for the server stop and propagates +// cancellation to the derived grpc context. +func (s *GRPCProviderServer) StopContext(ctx context.Context) context.Context { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + stoppable, cancel := context.WithCancel(ctx) + // It's important to pass the reference to the current stopCh. + // Closing over with an anonymous function and referencing s.stopCh + // across a goroutine is unsafe, given a new stopCh is set in Stop() + go mergeStop(s.stopCh, cancel) + return stoppable } func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) { - // Here we are certain that the provider is being called through grpc, so - // make sure the feature flag for helper/schema is set - schema.SetProto5() resp := &proto.GetProviderSchema_Response{ ResourceSchemas: make(map[string]*proto.Schema), @@ -55,6 +68,10 @@ func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProvider Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), } + resp.ProviderMeta = &proto.Schema{ + Block: convert.ConfigSchemaToProto(s.getProviderMetaSchemaBlock()), + } + for typ, res := range s.provider.ResourcesMap { resp.ResourceSchemas[typ] = &proto.Schema{ Version: int64(res.SchemaVersion), @@ -76,6 +93,10 @@ func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block { return schema.InternalMap(s.provider.Schema).CoreConfigSchema() } +func (s *GRPCProviderServer) getProviderMetaSchemaBlock() *configschema.Block { + return schema.InternalMap(s.provider.ProviderMetaSchema).CoreConfigSchema() +} + func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block { res := s.provider.ResourcesMap[name] return res.CoreConfigSchema() @@ -124,7 +145,7 @@ func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto } // this is deprecated, so don't set it - if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + if attrSchema.Deprecated != "" { return val, nil } @@ -176,8 +197,7 @@ func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - warns, errs := s.provider.Validate(config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.Validate(config)) preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) if err != nil { @@ -203,8 +223,7 @@ func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req * config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - warns, errs := s.provider.ValidateResource(req.TypeName, config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.ValidateResource(req.TypeName, config)) return resp, nil } @@ -228,16 +247,19 @@ func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *pr config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - warns, errs := s.provider.ValidateDataSource(req.TypeName, config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.ValidateDataSource(req.TypeName, config)) return resp, nil } -func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) { +func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) { resp := &proto.UpgradeResourceState_Response{} - res := s.provider.ResourcesMap[req.TypeName] + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } schemaBlock := s.getResourceSchemaBlock(req.TypeName) version := int(req.Version) @@ -249,7 +271,7 @@ func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto. // We first need to upgrade a flatmap state if it exists. // There should never be both a JSON and Flatmap state in the request. case len(req.RawState.Flatmap) > 0: - jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res) + jsonMap, version, err = s.upgradeFlatmapState(ctx, version, req.RawState.Flatmap, res) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) return resp, nil @@ -267,7 +289,7 @@ func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto. } // complete the upgrade of the JSON states - jsonMap, err = s.upgradeJSONState(version, jsonMap, res) + jsonMap, err = s.upgradeJSONState(ctx, version, jsonMap, res) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) return resp, nil @@ -311,7 +333,7 @@ func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto. // map[string]interface{}. // upgradeFlatmapState returns the json map along with the corresponding schema // version. -func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) { +func (s *GRPCProviderServer) upgradeFlatmapState(ctx context.Context, version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) { // this will be the version we've upgraded so, defaulting to the given // version in case no migration was called. upgradedVersion := version @@ -343,7 +365,6 @@ func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]strin "schema_version": strconv.Itoa(version), }, } - is, err := res.MigrateState(version, is, s.provider.Meta()) if err != nil { return nil, 0, err @@ -384,7 +405,7 @@ func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]strin return jsonMap, upgradedVersion, err } -func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) { +func (s *GRPCProviderServer) upgradeJSONState(ctx context.Context, version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) { var err error for _, upgrader := range res.StateUpgraders { @@ -392,7 +413,7 @@ func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interfac continue } - m, err = upgrader.Upgrade(m, s.provider.Meta()) + m, err = upgrader.Upgrade(ctx, m, s.provider.Meta()) if err != nil { return nil, err } @@ -454,17 +475,18 @@ func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { } func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) { - resp := &proto.Stop_Response{} + s.stopMu.Lock() + defer s.stopMu.Unlock() - err := s.provider.Stop() - if err != nil { - resp.Error = err.Error() - } + // stop + close(s.stopCh) + // reset the stop signal + s.stopCh = make(chan struct{}) - return resp, nil + return &proto.Stop_Response{}, nil } -func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) { +func (s *GRPCProviderServer) Configure(ctx context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) { resp := &proto.Configure_Response{} schemaBlock := s.getProviderSchemaBlock() @@ -484,13 +506,13 @@ func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_R } config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - err = s.provider.Configure(config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + diags := s.provider.Configure(ctx, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) return resp, nil } -func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { +func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { resp := &proto.ReadResource_Response{ // helper/schema did previously handle private data during refresh, but // core is now going to expect this to be maintained in order to @@ -498,7 +520,11 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso Private: req.Private, } - res := s.provider.ResourcesMap[req.TypeName] + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } schemaBlock := s.getResourceSchemaBlock(req.TypeName) stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType()) @@ -522,9 +548,19 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso } instanceState.Meta = private - newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.Msgpack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + instanceState.ProviderMeta = providerSchemaVal + } + + newInstanceState, diags := res.RefreshWithoutUpgrade(ctx, instanceState, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + if diags.HasError() { return resp, nil } @@ -567,7 +603,7 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso return resp, nil } -func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { +func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { resp := &proto.PlanResourceChange_Response{} // This is a signal to Terraform Core that we're doing the best we can to @@ -578,7 +614,11 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl // confusing downstream errors. resp.LegacyTypeSystem = true - res := s.provider.ResourcesMap[req.TypeName] + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } schemaBlock := s.getResourceSchemaBlock(req.TypeName) priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) @@ -602,10 +642,6 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl return resp, nil } - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - priorState, err := res.ShimInstanceStateFromValue(priorStateVal) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) @@ -621,6 +657,16 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl priorState.Meta = priorPrivate + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.Msgpack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorState.ProviderMeta = providerSchemaVal + } + // Ensure there are no nulls that will cause helper/schema to panic. if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) @@ -630,7 +676,7 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl // turn the proposed state into a legacy configuration cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock) - diff, err := s.provider.SimpleDiff(info, priorState, cfg) + diff, err := res.SimpleDiff(ctx, priorState, cfg, s.provider.Meta()) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) return resp, nil @@ -784,13 +830,17 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl return resp, nil } -func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { +func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { resp := &proto.ApplyResourceChange_Response{ // Start with the existing state as a fallback NewState: req.PriorState, } - res := s.provider.ResourcesMap[req.TypeName] + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } schemaBlock := s.getResourceSchemaBlock(req.TypeName) priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) @@ -805,10 +855,6 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A return resp, nil } - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - priorState, err := res.ShimInstanceStateFromValue(priorStateVal) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) @@ -835,7 +881,7 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A Destroy: true, } } else { - diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res)) + diff, err = schema.DiffFromValues(ctx, priorStateVal, plannedStateVal, stripResourceModifiers(res)) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) return resp, nil @@ -882,11 +928,19 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A } } - newInstanceState, err := s.provider.Apply(info, priorState, diff) - // we record the error here, but continue processing any returned state. - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.Msgpack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorState.ProviderMeta = providerSchemaVal } + + newInstanceState, diags := res.Apply(ctx, priorState, diff, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + newStateVal := cty.NullVal(schemaBlock.ImpliedType()) // Always return a null value for destroy. @@ -943,14 +997,14 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A return resp, nil } -func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) { +func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) { resp := &proto.ImportResourceState_Response{} info := &terraform.InstanceInfo{ Type: req.TypeName, } - newInstanceStates, err := s.provider.ImportState(info, req.Id) + newInstanceStates, err := s.provider.ImportState(ctx, info, req.Id) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) return resp, nil @@ -1001,7 +1055,7 @@ func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.I return resp, nil } -func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) { +func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) { resp := &proto.ReadDataSource_Response{} schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) @@ -1012,10 +1066,6 @@ func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDa return resp, nil } - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - // Ensure there are no nulls that will cause helper/schema to panic. if err := validateConfigNulls(configVal, nil); err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) @@ -1026,16 +1076,21 @@ func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDa // we need to still build the diff separately with the Read method to match // the old behavior - diff, err := s.provider.ReadDataDiff(info, config) + res, ok := s.provider.DataSourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown data source: %s", req.TypeName)) + return resp, nil + } + diff, err := res.Diff(ctx, nil, config, s.provider.Meta()) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) return resp, nil } // now we can get the new complete data source - newInstanceState, err := s.provider.ReadDataApply(info, diff) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + newInstanceState, diags := res.ReadDataApply(ctx, diff, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + if diags.HasError() { return resp, nil } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/unknown.go similarity index 96% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/unknown.go index a22a264f..fcf156b2 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/unknown.go @@ -3,8 +3,9 @@ package plugin import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" ) // SetUnknowns takes a cty.Value, and compares it to the schema setting any null diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go similarity index 97% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go index a8629046..0189190e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go @@ -1,8 +1,9 @@ package objchange import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" ) // NormalizeObjectFromLegacySDK takes an object that may have been generated diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go similarity index 64% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go index f20f0507..d7bfb067 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go @@ -1,24 +1,13 @@ package convert import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/zclconf/go-cty/cty" -) + "fmt" -// WarnsAndErrorsToProto converts the warnings and errors return by the legacy -// provider to protobuf diagnostics. -func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { - for _, w := range warns { - diags = AppendProtoDiag(diags, w) - } + "github.com/hashicorp/go-cty/cty" - for _, e := range errs { - diags = AppendProtoDiag(diags, e) - } - - return diags -} + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" +) // AppendProtoDiag appends a new diagnostic from a warning string or an error. // This panics if d is not a string or error. @@ -31,6 +20,8 @@ func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnost Summary: d.Error(), Attribute: ap, }) + case diag.Diagnostics: + diags = append(diags, DiagsToProto(d)...) case error: diags = append(diags, &proto.Diagnostic{ Severity: proto.Diagnostic_ERROR, @@ -49,38 +40,57 @@ func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnost return diags } -// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. -func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics +// ProtoToDiags converts a list of proto.Diagnostics to a diag.Diagnostics. +func ProtoToDiags(ds []*proto.Diagnostic) diag.Diagnostics { + var diags diag.Diagnostics for _, d := range ds { - var severity tfdiags.Severity + var severity diag.Severity switch d.Severity { case proto.Diagnostic_ERROR: - severity = tfdiags.Error + severity = diag.Error case proto.Diagnostic_WARNING: - severity = tfdiags.Warning - } - - var newDiag tfdiags.Diagnostic - - // if there's an attribute path, we need to create a AttributeValue diagnostic - if d.Attribute != nil { - path := AttributePathToPath(d.Attribute) - newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) - } else { - newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) + severity = diag.Warning } - diags = diags.Append(newDiag) + diags = append(diags, diag.Diagnostic{ + Severity: severity, + Summary: d.Summary, + Detail: d.Detail, + AttributePath: AttributePathToPath(d.Attribute), + }) } return diags } +func DiagsToProto(diags diag.Diagnostics) []*proto.Diagnostic { + var ds []*proto.Diagnostic + for _, d := range diags { + if err := d.Validate(); err != nil { + panic(fmt.Errorf("Invalid diagnostic: %s. This is always a bug in the provider implementation", err)) + } + protoDiag := &proto.Diagnostic{ + Summary: d.Summary, + Detail: d.Detail, + Attribute: PathToAttributePath(d.AttributePath), + } + if d.Severity == diag.Error { + protoDiag.Severity = proto.Diagnostic_ERROR + } else if d.Severity == diag.Warning { + protoDiag.Severity = proto.Diagnostic_WARNING + } + ds = append(ds, protoDiag) + } + return ds +} + // AttributePathToPath takes the proto encoded path and converts it to a cty.Path func AttributePathToPath(ap *proto.AttributePath) cty.Path { var p cty.Path + if ap == nil { + return p + } for _, step := range ap.Steps { switch selector := step.Selector.(type) { case *proto.AttributePath_Step_AttributeName: @@ -94,7 +104,7 @@ func AttributePathToPath(ap *proto.AttributePath) cty.Path { return p } -// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. +// PathToAttributePath takes a cty.Path and converts it to a proto-encoded path. func PathToAttributePath(p cty.Path) *proto.AttributePath { ap := &proto.AttributePath{} for _, step := range p { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go similarity index 91% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go index 88b8a9a6..4921ebab 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go @@ -6,9 +6,8 @@ import ( "reflect" "sort" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" ) // ConfigSchemaToProto takes a *configschema.Block and converts it to a @@ -89,14 +88,6 @@ func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Sch } } -// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. -func ProtoToProviderSchema(s *proto.Schema) providers.Schema { - return providers.Schema{ - Version: s.Version, - Block: ProtoToConfigSchema(s.Block), - } -} - // ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it // to a terraform *configschema.Block. func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go similarity index 73% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go index 8e41f46e..2201d0c0 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go @@ -5,7 +5,7 @@ import ( "fmt" "strconv" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" ) // FormatCtyPath is a helper function to produce a user-friendly string @@ -54,15 +54,3 @@ func FormatError(err error) string { return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) } - -// FormatErrorPrefixed is like FormatError except that it presents any path -// information after the given prefix string, which is assumed to contain -// an HCL syntax representation of the value that errors are relative to. -func FormatErrorPrefixed(err error, prefix string) string { - perr, ok := err.(cty.PathError) - if !ok || len(perr.Path) == 0 { - return fmt.Sprintf("%s: %s", prefix, err.Error()) - } - - return fmt.Sprintf("%s%s: %s", prefix, FormatCtyPath(perr.Path), perr.Error()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go new file mode 100644 index 00000000..b063bc1d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go @@ -0,0 +1,81 @@ +package tfdiags + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// AttributeValue returns a diagnostic about an attribute value in an implied current +// configuration context. This should be returned only from functions whose +// interface specifies a clear configuration context that this will be +// resolved in. +// +// The given path is relative to the implied configuration context. To describe +// a top-level attribute, it should be a single-element cty.Path with a +// cty.GetAttrStep. It's assumed that the path is returning into a structure +// that would be produced by our conventions in the configschema package; it +// may return unexpected results for structures that can't be represented by +// configschema. +// +// Since mapping attribute paths back onto configuration is an imprecise +// operation (e.g. dynamic block generation may cause the same block to be +// evaluated multiple times) the diagnostic detail should include the attribute +// name and other context required to help the user understand what is being +// referenced in case the identified source range is not unique. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is the value assigned to the +// named attribute, or the containing body's "missing item range" if no +// value is present. +func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { + return &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + attrPath: attrPath, + } +} + +// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains +// one. Normally this is not accessed directly, and instead the config body is +// added to the Diagnostic to create a more complete message for the user. In +// some cases however, we may want to know just the name of the attribute that +// generated the Diagnostic message. +// This returns a nil cty.Path if it does not exist in the Diagnostic. +func GetAttribute(d Diagnostic) cty.Path { + if d, ok := d.(*attributeDiagnostic); ok { + return d.attrPath + } + return nil +} + +type attributeDiagnostic struct { + diagnosticBase + attrPath cty.Path +} + +// WholeContainingBody returns a diagnostic about the body that is an implied +// current configuration context. This should be returned only from +// functions whose interface specifies a clear configuration context that this +// will be resolved in. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is currently the missing item +// range of the body. In future, this may change to some other suitable +// part of the containing body. +func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { + return &wholeBodyDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + } +} + +type wholeBodyDiagnostic struct { + diagnosticBase +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go new file mode 100644 index 00000000..a36da370 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go @@ -0,0 +1,20 @@ +package tfdiags + +type Diagnostic interface { + Severity() Severity + Description() Description +} + +type Severity rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go similarity index 73% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go index 50bf9d8e..34816208 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go @@ -2,7 +2,7 @@ package tfdiags // diagnosticBase can be embedded in other diagnostic structs to get // default implementations of Severity and Description. This type also -// has default implementations of Source and FromExpr that return no source +// has default implementations of Source that return no source // location or expression-related information, so embedders should generally // override those method to return more useful results where possible. type diagnosticBase struct { @@ -22,10 +22,10 @@ func (d diagnosticBase) Description() Description { } } -func (d diagnosticBase) Source() Source { - return Source{} -} - -func (d diagnosticBase) FromExpr() *FromExpr { - return nil +func Diag(sev Severity, summary, detail string) Diagnostic { + return &diagnosticBase{ + severity: sev, + summary: summary, + detail: detail, + } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go similarity index 64% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go index a19fa80c..925c14fa 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go @@ -3,13 +3,7 @@ package tfdiags import ( "bytes" "fmt" - "path/filepath" "sort" - "strings" - - "github.com/hashicorp/errwrap" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl/v2" ) // Diagnostics is a list of diagnostics. Diagnostics is intended to be used @@ -21,81 +15,6 @@ import ( // diagnostics to report at all. type Diagnostics []Diagnostic -// Append is the main interface for constructing Diagnostics lists, taking -// an existing list (which may be nil) and appending the new objects to it -// after normalizing them to be implementations of Diagnostic. -// -// The usual pattern for a function that natively "speaks" diagnostics is: -// -// // Create a nil Diagnostics at the start of the function -// var diags diag.Diagnostics -// -// // At later points, build on it if errors / warnings occur: -// foo, err := DoSomethingRisky() -// if err != nil { -// diags = diags.Append(err) -// } -// -// // Eventually return the result and diagnostics in place of error -// return result, diags -// -// Append accepts a variety of different diagnostic-like types, including -// native Go errors and HCL diagnostics. It also knows how to unwrap -// a multierror.Error into separate error diagnostics. It can be passed -// another Diagnostics to concatenate the two lists. If given something -// it cannot handle, this function will panic. -func (diags Diagnostics) Append(new ...interface{}) Diagnostics { - for _, item := range new { - if item == nil { - continue - } - - switch ti := item.(type) { - case Diagnostic: - diags = append(diags, ti) - case Diagnostics: - diags = append(diags, ti...) // flatten - case diagnosticsAsError: - diags = diags.Append(ti.Diagnostics) // unwrap - case NonFatalError: - diags = diags.Append(ti.Diagnostics) // unwrap - case hcl.Diagnostics: - for _, hclDiag := range ti { - diags = append(diags, hclDiagnostic{hclDiag}) - } - case *hcl.Diagnostic: - diags = append(diags, hclDiagnostic{ti}) - case *multierror.Error: - for _, err := range ti.Errors { - diags = append(diags, nativeError{err}) - } - case error: - switch { - case errwrap.ContainsType(ti, Diagnostics(nil)): - // If we have an errwrap wrapper with a Diagnostics hiding - // inside then we'll unpick it here to get access to the - // individual diagnostics. - diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) - case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): - // Likewise, if we have HCL diagnostics we'll unpick that too. - diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) - default: - diags = append(diags, nativeError{ti}) - } - default: - panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) - } - } - - // Given the above, we should never end up with a non-nil empty slice - // here, but we'll make sure of that so callers can rely on empty == nil - if len(diags) == 0 { - return nil - } - - return diags -} - // HasErrors returns true if any of the diagnostics in the list have // a severity of Error. func (diags Diagnostics) HasErrors() bool { @@ -274,36 +193,10 @@ func (sd sortDiagnostics) Len() int { func (sd sortDiagnostics) Less(i, j int) bool { iD, jD := sd[i], sd[j] iSev, jSev := iD.Severity(), jD.Severity() - iSrc, jSrc := iD.Source(), jD.Source() switch { - case iSev != jSev: return iSev == Warning - - case (iSrc.Subject == nil) != (jSrc.Subject == nil): - return iSrc.Subject == nil - - case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject: - iSubj := iSrc.Subject - jSubj := jSrc.Subject - switch { - case iSubj.Filename != jSubj.Filename: - // Path with fewer segments goes first if they are different lengths - sep := string(filepath.Separator) - iCount := strings.Count(iSubj.Filename, sep) - jCount := strings.Count(jSubj.Filename, sep) - if iCount != jCount { - return iCount < jCount - } - return iSubj.Filename < jSubj.Filename - case iSubj.Start.Byte != jSubj.Start.Byte: - return iSubj.Start.Byte < jSubj.Start.Byte - case iSubj.End.Byte != jSubj.End.Byte: - return iSubj.End.Byte < jSubj.End.Byte - } - fallthrough - default: // The remaining properties do not have a defined ordering, so // we'll leave it unspecified. Since we use sort.Stable in diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go similarity index 60% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go index 13f7a714..b63c5d74 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go @@ -17,12 +17,8 @@ func (e nativeError) Description() Description { } } -func (e nativeError) Source() Source { - // No source information available for a native error - return Source{} -} - -func (e nativeError) FromExpr() *FromExpr { - // Native errors are not expression-related - return nil +func FromError(err error) Diagnostic { + return &nativeError{ + err: err, + } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go similarity index 66% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go index 485063b0..1b78887e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go @@ -8,8 +8,6 @@ type rpcFriendlyDiag struct { Severity_ Severity Summary_ string Detail_ string - Subject_ *SourceRange - Context_ *SourceRange } // rpcFriendlyDiag transforms a given diagnostic so that is more friendly to @@ -20,13 +18,10 @@ type rpcFriendlyDiag struct { // serializations later. func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { desc := diag.Description() - source := diag.Source() return &rpcFriendlyDiag{ Severity_: diag.Severity(), Summary_: desc.Summary, Detail_: desc.Detail, - Subject_: source.Subject, - Context_: source.Context, } } @@ -41,19 +36,6 @@ func (d *rpcFriendlyDiag) Description() Description { } } -func (d *rpcFriendlyDiag) Source() Source { - return Source{ - Subject: d.Subject_, - Context: d.Context_, - } -} - -func (d rpcFriendlyDiag) FromExpr() *FromExpr { - // RPC-friendly diagnostics cannot preserve expression information because - // expressions themselves are not RPC-friendly. - return nil -} - func init() { gob.Register((*rpcFriendlyDiag)(nil)) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/severity_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/severity_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/severity_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go similarity index 63% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go index b0f1ecd4..09191704 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go @@ -18,13 +18,3 @@ func (e simpleWarning) Description() Description { Summary: string(e), } } - -func (e simpleWarning) Source() Source { - // No source information available for a simple warning - return Source{} -} - -func (e simpleWarning) FromExpr() *FromExpr { - // Simple warnings are not expression-related - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/generate.sh similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/generate.sh rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/generate.sh diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.pb.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.pb.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.proto similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.proto rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.proto diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/grpc_provider.go new file mode 100644 index 00000000..4a605ce3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/grpc_provider.go @@ -0,0 +1,41 @@ +package plugin + +import ( + "context" + "errors" + "net/rpc" + + plugin "github.com/hashicorp/go-plugin" + "google.golang.org/grpc" + + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" +) + +var ( + _ plugin.GRPCPlugin = (*gRPCProviderPlugin)(nil) + _ plugin.Plugin = (*gRPCProviderPlugin)(nil) +) + +// gRPCProviderPlugin implements plugin.GRPCPlugin and plugin.Plugin for the go-plugin package. +// the only real implementation is GRPCSServer, the other methods are only satisfied +// for compatibility with go-plugin +type gRPCProviderPlugin struct { + GRPCProvider func() proto.ProviderServer +} + +func (p *gRPCProviderPlugin) Server(*plugin.MuxBroker) (interface{}, error) { + return nil, errors.New("terraform-plugin-sdk only implements grpc servers") +} + +func (p *gRPCProviderPlugin) Client(*plugin.MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("terraform-plugin-sdk only implements grpc servers") +} + +func (p *gRPCProviderPlugin) GRPCClient(context.Context, *plugin.GRPCBroker, *grpc.ClientConn) (interface{}, error) { + return nil, errors.New("terraform-plugin-sdk only implements grpc servers") +} + +func (p *gRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProviderServer(s, p.GRPCProvider()) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go new file mode 100644 index 00000000..ca49542b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go @@ -0,0 +1,70 @@ +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin" + "google.golang.org/grpc" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + grpcplugin "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin" + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" +) + +const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. + ProviderPluginName = "provider" +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type ProviderFunc func() *schema.Provider +type GRPCProviderFunc func() proto.ProviderServer + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + ProviderFunc ProviderFunc + + // Wrapped versions of the above plugins will automatically shimmed and + // added to the GRPC functions when possible. + GRPCProviderFunc GRPCProviderFunc +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + // since the plugins may not yet be aware of the new protocol, we + // automatically wrap the plugins in the grpc shims. + if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { + opts.GRPCProviderFunc = func() proto.ProviderServer { + return grpcplugin.NewGRPCProviderServer(opts.ProviderFunc()) + } + } + + provider := opts.GRPCProviderFunc() + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + VersionedPlugins: map[int]plugin.PluginSet{ + 5: { + ProviderPluginName: &gRPCProviderPlugin{ + GRPCProvider: func() proto.ProviderServer { + return provider + }, + }, + }, + }, + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(append(opts, grpc.UnaryInterceptor(func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + ctx = provider.(*grpcplugin.GRPCProviderServer).StopContext(ctx) + return handler(ctx, req) + }))...) + }, + }) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go similarity index 70% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go index fd5b389b..c7d82cf3 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go @@ -1,8 +1,6 @@ package terraform import ( - "bufio" - "bytes" "fmt" "log" "reflect" @@ -12,375 +10,27 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" - "github.com/mitchellh/copystructure" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" ) -// DiffChangeType is an enum with the kind of changes a diff has planned. -type DiffChangeType byte +// diffChangeType is an enum with the kind of changes a diff has planned. +type diffChangeType byte const ( - DiffInvalid DiffChangeType = iota - DiffNone - DiffCreate - DiffUpdate - DiffDestroy - DiffDestroyCreate - - // DiffRefresh is only used in the UI for displaying diffs. - // Managed resource reads never appear in plan, and when data source - // reads appear they are represented as DiffCreate in core before - // transforming to DiffRefresh in the UI layer. - DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion + diffInvalid diffChangeType = iota + diffNone + diffCreate + diffUpdate + diffDestroy + diffDestroyCreate ) // multiVal matches the index key to a flatmapped set, list or map var multiVal = regexp.MustCompile(`\.(#|%)$`) -// Diff tracks the changes that are necessary to apply a configuration -// to an existing infrastructure. -type Diff struct { - // Modules contains all the modules that have a diff - Modules []*ModuleDiff -} - -// Prune cleans out unused structures in the diff without affecting -// the behavior of the diff at all. -// -// This is not safe to call concurrently. This is safe to call on a -// nil Diff. -func (d *Diff) Prune() { - if d == nil { - return - } - - // Prune all empty modules - newModules := make([]*ModuleDiff, 0, len(d.Modules)) - for _, m := range d.Modules { - // If the module isn't empty, we keep it - if !m.Empty() { - newModules = append(newModules, m) - } - } - if len(newModules) == 0 { - newModules = nil - } - d.Modules = newModules -} - -// AddModule adds the module with the given path to the diff. -// -// This should be the preferred method to add module diffs since it -// allows us to optimize lookups later as well as control sorting. -func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { - // Lower the new-style address into a legacy-style address. - // This requires that none of the steps have instance keys, which is - // true for all addresses at the time of implementing this because - // "count" and "for_each" are not yet implemented for modules. - legacyPath := make([]string, len(path)) - for i, step := range path { - if step.InstanceKey != addrs.NoKey { - // FIXME: Once the rest of Terraform is ready to use count and - // for_each, remove all of this and just write the addrs.ModuleInstance - // value itself into the ModuleState. - panic("diff cannot represent modules with count or for_each keys") - } - - legacyPath[i] = step.Name - } - - m := &ModuleDiff{Path: legacyPath} - m.init() - d.Modules = append(d.Modules, m) - return m -} - -// ModuleByPath is used to lookup the module diff for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { - if d == nil { - return nil - } - for _, mod := range d.Modules { - if mod.Path == nil { - panic("missing module path") - } - modPath := normalizeModulePath(mod.Path) - if modPath.String() == path.String() { - return mod - } - } - return nil -} - -// RootModule returns the ModuleState for the root module -func (d *Diff) RootModule() *ModuleDiff { - root := d.ModuleByPath(addrs.RootModuleInstance) - if root == nil { - panic("missing root module") - } - return root -} - -// Empty returns true if the diff has no changes. -func (d *Diff) Empty() bool { - if d == nil { - return true - } - - for _, m := range d.Modules { - if !m.Empty() { - return false - } - } - - return true -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *Diff) Equal(d2 *Diff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Sort the modules - sort.Sort(moduleDiffSort(d.Modules)) - sort.Sort(moduleDiffSort(d2.Modules)) - - // Copy since we have to modify the module destroy flag to false so - // we don't compare that. TODO: delete this when we get rid of the - // destroy flag on modules. - dCopy := d.DeepCopy() - d2Copy := d2.DeepCopy() - for _, m := range dCopy.Modules { - m.Destroy = false - } - for _, m := range d2Copy.Modules { - m.Destroy = false - } - - // Use DeepEqual - return reflect.DeepEqual(dCopy, d2Copy) -} - -// DeepCopy performs a deep copy of all parts of the Diff, making the -// resulting Diff safe to use without modifying this one. -func (d *Diff) DeepCopy() *Diff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*Diff) -} - -func (d *Diff) String() string { - var buf bytes.Buffer - - keys := make([]string, 0, len(d.Modules)) - lookup := make(map[string]*ModuleDiff) - for _, m := range d.Modules { - addr := normalizeModulePath(m.Path) - key := addr.String() - keys = append(keys, key) - lookup[key] = m - } - sort.Strings(keys) - - for _, key := range keys { - m := lookup[key] - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("%s:\n", key)) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return strings.TrimSpace(buf.String()) -} - -// ModuleDiff tracks the differences between resources to apply within -// a single module. -type ModuleDiff struct { - Path []string - Resources map[string]*InstanceDiff - Destroy bool // Set only by the destroy plan -} - -func (d *ModuleDiff) init() { - if d.Resources == nil { - d.Resources = make(map[string]*InstanceDiff) - } - for _, r := range d.Resources { - r.init() - } -} - -// ChangeType returns the type of changes that the diff for this -// module includes. -// -// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or -// DiffCreate. If an instance within the module has a DiffDestroyCreate -// then this will register as a DiffCreate for a module. -func (d *ModuleDiff) ChangeType() DiffChangeType { - result := DiffNone - for _, r := range d.Resources { - change := r.ChangeType() - switch change { - case DiffCreate, DiffDestroy: - if result == DiffNone { - result = change - } - case DiffDestroyCreate, DiffUpdate: - result = DiffUpdate - } - } - - return result -} - -// Empty returns true if the diff has no changes within this module. -func (d *ModuleDiff) Empty() bool { - if d.Destroy { - return false - } - - if len(d.Resources) == 0 { - return true - } - - for _, rd := range d.Resources { - if !rd.Empty() { - return false - } - } - - return true -} - -// Instances returns the instance diffs for the id given. This can return -// multiple instance diffs if there are counts within the resource. -func (d *ModuleDiff) Instances(id string) []*InstanceDiff { - var result []*InstanceDiff - for k, diff := range d.Resources { - if k == id || strings.HasPrefix(k, id+".") { - if !diff.Empty() { - result = append(result, diff) - } - } - } - - return result -} - -// IsRoot says whether or not this module diff is for the root module. -func (d *ModuleDiff) IsRoot() bool { - return reflect.DeepEqual(d.Path, rootModulePath) -} - -// String outputs the diff in a long but command-line friendly output -// format that users can read to quickly inspect a diff. -func (d *ModuleDiff) String() string { - var buf bytes.Buffer - - names := make([]string, 0, len(d.Resources)) - for name := range d.Resources { - names = append(names, name) - } - sort.Strings(names) - - for _, name := range names { - rdiff := d.Resources[name] - - crud := "UPDATE" - switch { - case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): - crud = "DESTROY/CREATE" - case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): - crud = "DESTROY" - case rdiff.RequiresNew(): - crud = "CREATE" - } - - extra := "" - if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { - extra = " (deposed only)" - } - - buf.WriteString(fmt.Sprintf( - "%s: %s%s\n", - crud, - name, - extra)) - - keyLen := 0 - rdiffAttrs := rdiff.CopyAttributes() - keys := make([]string, 0, len(rdiffAttrs)) - for key := range rdiffAttrs { - if key == "id" { - continue - } - - keys = append(keys, key) - if len(key) > keyLen { - keyLen = len(key) - } - } - sort.Strings(keys) - - for _, attrK := range keys { - attrDiff, _ := rdiff.GetAttribute(attrK) - - v := attrDiff.New - u := attrDiff.Old - if attrDiff.NewComputed { - v = "" - } - - if attrDiff.Sensitive { - u = "" - v = "" - } - - updateMsg := "" - if attrDiff.RequiresNew { - updateMsg = " (forces new resource)" - } else if attrDiff.Sensitive { - updateMsg = " (attribute changed)" - } - - buf.WriteString(fmt.Sprintf( - " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, - v, - updateMsg)) - } - } - - return buf.String() -} - // InstanceDiff is the diff of a resource from some state to another. type InstanceDiff struct { mu sync.Mutex @@ -955,12 +605,7 @@ type ResourceAttrDiff struct { NewExtra interface{} // Extra information for the provider RequiresNew bool // True if change requires new resource Sensitive bool // True if the data should not be displayed in UI output - Type DiffAttrType -} - -// Empty returns true if the diff for this attr is neutral -func (d *ResourceAttrDiff) Empty() bool { - return d.Old == d.New && !d.NewComputed && !d.NewRemoved + Type diffAttrType } func (d *ResourceAttrDiff) GoString() string { @@ -972,57 +617,32 @@ func (d *ResourceAttrDiff) GoString() string { // output attribute (comes as a result of applying the configuration). An // example input would be "ami" for AWS and an example output would be // "private_ip". -type DiffAttrType byte - -const ( - DiffAttrUnknown DiffAttrType = iota - DiffAttrInput - DiffAttrOutput -) - -func (d *InstanceDiff) init() { - if d.Attributes == nil { - d.Attributes = make(map[string]*ResourceAttrDiff) - } -} +type diffAttrType byte func NewInstanceDiff() *InstanceDiff { return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} } -func (d *InstanceDiff) Copy() (*InstanceDiff, error) { - if d == nil { - return nil, nil - } - - dCopy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - return nil, err - } - - return dCopy.(*InstanceDiff), nil -} - -// ChangeType returns the DiffChangeType represented by the diff +// ChangeType returns the diffChangeType represented by the diff // for this single instance. -func (d *InstanceDiff) ChangeType() DiffChangeType { +func (d *InstanceDiff) ChangeType() diffChangeType { if d.Empty() { - return DiffNone + return diffNone } if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { - return DiffDestroyCreate + return diffDestroyCreate } if d.GetDestroy() || d.GetDestroyDeposed() { - return DiffDestroy + return diffDestroy } if d.RequiresNew() { - return DiffCreate + return diffCreate } - return DiffUpdate + return diffUpdate } // Empty returns true if this diff encapsulates no changes. @@ -1044,6 +664,7 @@ func (d *InstanceDiff) Empty() bool { // This is different from the Same comparison that is supported which // checks for operation equality taking into account computed values. Equal // instead checks for exact equality. +// TODO: investigate why removing this unused method causes panic in tests func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { // If one is nil, they must both be nil if d == nil || d2 == nil { @@ -1054,16 +675,6 @@ func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { return reflect.DeepEqual(d, d2) } -// DeepCopy performs a deep copy of all parts of the InstanceDiff -func (d *InstanceDiff) DeepCopy() *InstanceDiff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*InstanceDiff) -} - func (d *InstanceDiff) GoString() string { return fmt.Sprintf("*%#v", InstanceDiff{ Attributes: d.Attributes, @@ -1111,23 +722,6 @@ func (d *InstanceDiff) GetDestroyDeposed() bool { return d.DestroyDeposed } -func (d *InstanceDiff) SetDestroyDeposed(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyDeposed = b -} - -// These methods are properly locked, for use outside other InstanceDiff -// methods but everywhere else within the terraform package. -// TODO refactor the locking scheme -func (d *InstanceDiff) SetTainted(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyTainted = b -} - func (d *InstanceDiff) GetDestroyTainted() bool { d.mu.Lock() defer d.mu.Unlock() @@ -1135,13 +729,6 @@ func (d *InstanceDiff) GetDestroyTainted() bool { return d.DestroyTainted } -func (d *InstanceDiff) SetDestroy(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Destroy = b -} - func (d *InstanceDiff) GetDestroy() bool { d.mu.Lock() defer d.mu.Unlock() @@ -1149,20 +736,6 @@ func (d *InstanceDiff) GetDestroy() bool { return d.Destroy } -func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Attributes[key] = attr -} - -func (d *InstanceDiff) DelAttribute(key string) { - d.mu.Lock() - defer d.mu.Unlock() - - delete(d.Attributes, key) -} - func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { d.mu.Lock() defer d.mu.Unlock() @@ -1170,12 +743,6 @@ func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { attr, ok := d.Attributes[key] return attr, ok } -func (d *InstanceDiff) GetAttributesLen() int { - d.mu.Lock() - defer d.mu.Unlock() - - return len(d.Attributes) -} // Safely copies the Attributes map func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { @@ -1421,21 +988,3 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { return true, "" } - -// moduleDiffSort implements sort.Interface to sort module diffs by path. -type moduleDiffSort []*ModuleDiff - -func (s moduleDiffSort) Len() int { return len(s) } -func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s moduleDiffSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go new file mode 100644 index 00000000..b01e5a48 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go @@ -0,0 +1,13 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=instanceType instancetype.go + +// instanceType is an enum of the various types of instances store in the State +type instanceType int + +const ( + typeInvalid instanceType = iota + typePrimary + typeTainted + typeDeposed +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go new file mode 100644 index 00000000..782ef90c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=instanceType instancetype.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[typeInvalid-0] + _ = x[typePrimary-1] + _ = x[typeTainted-2] + _ = x[typeDeposed-3] +} + +const _instanceType_name = "typeInvalidtypePrimarytypeTaintedtypeDeposed" + +var _instanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i instanceType) String() string { + if i < 0 || i >= instanceType(len(_instanceType_index)-1) { + return "instanceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _instanceType_name[_instanceType_index[i]:_instanceType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go similarity index 60% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go index bd577460..11b63de8 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go @@ -7,66 +7,14 @@ import ( "strconv" "strings" + "github.com/hashicorp/go-cty/cty" "github.com/mitchellh/copystructure" "github.com/mitchellh/reflectwalk" - "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" ) -// Resource is a legacy way to identify a particular resource instance. -// -// New code should use addrs.ResourceInstance instead. This is still here -// only for codepaths that haven't been updated yet. -type Resource struct { - // These are all used by the new EvalNode stuff. - Name string - Type string - CountIndex int - - // These aren't really used anymore anywhere, but we keep them around - // since we haven't done a proper cleanup yet. - Id string - Info *InstanceInfo - Config *ResourceConfig - Dependencies []string - Diff *InstanceDiff - Provider ResourceProvider - State *InstanceState - Flags ResourceFlag -} - -// NewResource constructs a legacy Resource object from an -// addrs.ResourceInstance value. -// -// This is provided to shim to old codepaths that haven't been updated away -// from this type yet. Since this old type is not able to represent instances -// that have string keys, this function will panic if given a resource address -// that has a string key. -func NewResource(addr addrs.ResourceInstance) *Resource { - ret := &Resource{ - Name: addr.Resource.Name, - Type: addr.Resource.Type, - } - - if addr.Key != addrs.NoKey { - switch tk := addr.Key.(type) { - case addrs.IntKey: - ret.CountIndex = int(tk) - default: - panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) - } - } - - return ret -} - -// ResourceKind specifies what kind of instance we're working with, whether -// its a primary instance, a tainted instance, or an orphan. -type ResourceFlag byte - // InstanceInfo is used to hold information about the instance and/or // resource being modified. type InstanceInfo struct { @@ -82,95 +30,6 @@ type InstanceInfo struct { Type string } -// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. -// -// InstanceInfo is a legacy type, and uses of it should be gradually replaced -// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as -// appropriate. -// -// The legacy InstanceInfo type cannot represent module instances with instance -// keys, so this function will panic if given such a path. Uses of this type -// should all be removed or replaced before implementing "count" and "for_each" -// arguments on modules in order to avoid such panics. -// -// This legacy type also cannot represent resource instances with string -// instance keys. It will panic if the given key is not either NoKey or an -// IntKey. -func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { - // We need an old-style []string module path for InstanceInfo. - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - panic("NewInstanceInfo cannot convert module instance with key") - } - path[i] = step.Name - } - - // This is a funny old meaning of "id" that is no longer current. It should - // not be used for anything users might see. Note that it does not include - // a representation of the resource mode, and so it's impossible to - // determine from an InstanceInfo alone whether it is a managed or data - // resource that is being referred to. - id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) - if addr.Resource.Resource.Mode == addrs.DataResourceMode { - id = "data." + id - } - if addr.Resource.Key != addrs.NoKey { - switch k := addr.Resource.Key.(type) { - case addrs.IntKey: - id = id + fmt.Sprintf(".%d", int(k)) - default: - panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) - } - } - - return &InstanceInfo{ - Id: id, - ModulePath: path, - Type: addr.Resource.Resource.Type, - } -} - -// ResourceAddress returns the address of the resource that the receiver is describing. -func (i *InstanceInfo) ResourceAddress() *ResourceAddress { - // GROSS: for tainted and deposed instances, their status gets appended - // to i.Id to create a unique id for the graph node. Historically these - // ids were displayed to the user, so it's designed to be human-readable: - // "aws_instance.bar.0 (deposed #0)" - // - // So here we detect such suffixes and try to interpret them back to - // their original meaning so we can then produce a ResourceAddress - // with a suitable InstanceType. - id := i.Id - instanceType := TypeInvalid - if idx := strings.Index(id, " ("); idx != -1 { - remain := id[idx:] - id = id[:idx] - - switch { - case strings.Contains(remain, "tainted"): - instanceType = TypeTainted - case strings.Contains(remain, "deposed"): - instanceType = TypeDeposed - } - } - - addr, err := parseResourceAddressInternal(id) - if err != nil { - // should never happen, since that would indicate a bug in the - // code that constructed this InstanceInfo. - panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) - } - if len(i.ModulePath) > 1 { - addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied - } - if instanceType != TypeInvalid { - addr.InstanceTypeSet = true - addr.InstanceType = instanceType - } - return addr -} - // ResourceConfig is a legacy type that was formerly used to represent // interpolatable configuration blocks. It is now only used to shim to old // APIs that still use this type, via NewResourceConfigShimmed. @@ -335,22 +194,6 @@ func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { return true } -// CheckSet checks that the given list of configuration keys is -// properly set. If not, errors are returned for each unset key. -// -// This is useful to be called in the Validate method of a ResourceProvider. -func (c *ResourceConfig) CheckSet(keys []string) []error { - var errs []error - - for _, k := range keys { - if !c.IsSet(k) { - errs = append(errs, fmt.Errorf("%s must be set", k)) - } - } - - return errs -} - // Get looks up a configuration value by key and returns the value. // // The second return value is true if the get was successful. Get will @@ -399,28 +242,6 @@ func (c *ResourceConfig) IsComputed(k string) bool { return w.Unknown } -// IsSet checks if the key in the configuration is set. A key is set if -// it has a value or the value is being computed (is unknown currently). -// -// This function should be used rather than checking the keys of the -// raw configuration itself, since a key may be omitted from the raw -// configuration if it is being computed. -func (c *ResourceConfig) IsSet(k string) bool { - if c == nil { - return false - } - - if c.IsComputed(k) { - return true - } - - if _, ok := c.Get(k); ok { - return true - } - - return false -} - func (c *ResourceConfig) get( k string, raw map[string]interface{}) (interface{}, bool) { parts := strings.Split(k, ".") @@ -501,6 +322,8 @@ type unknownCheckWalker struct { Unknown bool } +// TODO: investigate why deleting this causes odd runtime test failures +// must be some kind of interface implementation func (w *unknownCheckWalker) Primitive(v reflect.Value) error { if v.Interface() == hcl2shim.UnknownVariableValue { w.Unknown = true diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go new file mode 100644 index 00000000..ec2665d3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go @@ -0,0 +1,226 @@ +package terraform + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// resourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type resourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType instanceType + InstanceTypeSet bool + Name string + Type string + Mode ResourceMode // significant only if InstanceTypeSet +} + +// String outputs the address that parses into this address. +func (r *resourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case ManagedResourceMode: + // nothing to do + case DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case typePrimary: + name += ".primary" + case typeDeposed: + name += ".deposed" + case typeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +func parseResourceAddress(s string) (*resourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := ManagedResourceMode + if matches["data_prefix"] != "" { + mode = DataResourceMode + } + resourceIndex, err := parseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := parseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := parseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) + } + + return &resourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *resourceAddress) Less(other *resourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + +func parseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func parseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func parseInstanceType(s string) (instanceType, error) { + switch s { + case "", "primary": + return typePrimary, nil + case "deposed": + return typeDeposed, nil + case "tainted": + return typeTainted, nil + default: + return typeInvalid, fmt.Errorf("Unexpected value for instanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.(?P[^.]+)\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("invalid resource address %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go new file mode 100644 index 00000000..ece8fc66 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go @@ -0,0 +1,26 @@ +package terraform + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go new file mode 100644 index 00000000..07e5a84f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go @@ -0,0 +1,26 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go similarity index 72% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go index b2b308e7..56e974ac 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go @@ -4,39 +4,26 @@ import ( "bufio" "bytes" "encoding/json" - "errors" "fmt" - "io" - "io/ioutil" "log" - "os" "reflect" "sort" "strconv" "strings" "sync" - "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-cty/cty" multierror "github.com/hashicorp/go-multierror" uuid "github.com/hashicorp/go-uuid" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" "github.com/mitchellh/copystructure" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" ) const ( // StateVersion is the current version for our state file - StateVersion = 3 + stateVersion = 3 ) // rootModulePath is the path of the root module @@ -112,6 +99,10 @@ type State struct { Modules []*ModuleState `json:"modules"` mu sync.Mutex + + // IsBinaryDrivenTest is a special flag that assists with a binary driver + // heuristic, it should not be set externally + IsBinaryDrivenTest bool } func (s *State) Lock() { s.mu.Lock() } @@ -337,8 +328,8 @@ func (s *State) Remove(addr ...string) error { defer s.Unlock() // Filter out what we need to delete - filter := &StateFilter{State: s} - results, err := filter.Filter(addr...) + filter := &stateFilter{State: s} + results, err := filter.filter(addr...) if err != nil { return err } @@ -496,43 +487,6 @@ func (s *State) equal(other *State) bool { return true } -// MarshalEqual is similar to Equal but provides a stronger definition of -// "equal", where two states are equal if and only if their serialized form -// is byte-for-byte identical. -// -// This is primarily useful for callers that are trying to save snapshots -// of state to persistent storage, allowing them to detect when a new -// snapshot must be taken. -// -// Note that the serial number and lineage are included in the serialized form, -// so it's the caller's responsibility to properly manage these attributes -// so that this method is only called on two states that have the same -// serial and lineage, unless detecting such differences is desired. -func (s *State) MarshalEqual(other *State) bool { - if s == nil && other == nil { - return true - } else if s == nil || other == nil { - return false - } - - recvBuf := &bytes.Buffer{} - otherBuf := &bytes.Buffer{} - - err := WriteState(s, recvBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - err = WriteState(other, otherBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) -} - type StateAgeComparison int const ( @@ -615,21 +569,6 @@ func (s *State) DeepCopy() *State { return copy.(*State) } -// FromFutureTerraform checks if this state was written by a Terraform -// version from the future. -func (s *State) FromFutureTerraform() bool { - s.Lock() - defer s.Unlock() - - // No TF version means it is certainly from the past - if s.TFVersion == "" { - return false - } - - v := version.Must(version.NewVersion(s.TFVersion)) - return tfversion.SemVer.LessThan(v) -} - func (s *State) Init() { s.Lock() defer s.Unlock() @@ -638,7 +577,7 @@ func (s *State) Init() { func (s *State) init() { if s.Version == 0 { - s.Version = StateVersion + s.Version = stateVersion } if s.moduleByPath(addrs.RootModuleInstance) == nil { @@ -776,57 +715,6 @@ type BackendState struct { Hash uint64 `json:"hash"` // Hash of portion of configuration from config files } -// Empty returns true if BackendState has no state. -func (s *BackendState) Empty() bool { - return s == nil || s.Type == "" -} - -// Config decodes the type-specific configuration object using the provided -// schema and returns the result as a cty.Value. -// -// An error is returned if the stored configuration does not conform to the -// given schema. -func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { - ty := schema.ImpliedType() - if s == nil { - return cty.NullVal(ty), nil - } - return ctyjson.Unmarshal(s.ConfigRaw, ty) -} - -// SetConfig replaces (in-place) the type-specific configuration object using -// the provided value and associated schema. -// -// An error is returned if the given value does not conform to the implied -// type of the schema. -func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { - ty := schema.ImpliedType() - buf, err := ctyjson.Marshal(val, ty) - if err != nil { - return err - } - s.ConfigRaw = buf - return nil -} - -// ForPlan produces an alternative representation of the reciever that is -// suitable for storing in a plan. The current workspace must additionally -// be provided, to be stored alongside the backend configuration. -// -// The backend configuration schema is required in order to properly -// encode the backend-specific configuration settings. -func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { - if s == nil { - return nil, nil - } - - configVal, err := s.Config(schema) - if err != nil { - return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) - } - return plans.NewBackend(s.Type, configVal, schema, workspaceName) -} - // RemoteState is used to track the information about a remote // state store that we push/pull state to. type RemoteState struct { @@ -862,24 +750,6 @@ func (r *RemoteState) Empty() bool { return r.Type == "" } -func (r *RemoteState) Equals(other *RemoteState) bool { - r.Lock() - defer r.Unlock() - - if r.Type != other.Type { - return false - } - if len(r.Config) != len(other.Config) { - return false - } - for k, v := range r.Config { - if other.Config[k] != v { - return false - } - } - return true -} - // OutputState is used to track the state relevant to a single output. type OutputState struct { // Sensitive describes whether the output is considered sensitive, @@ -1024,103 +894,6 @@ func (m *ModuleState) Equal(other *ModuleState) bool { return true } -// IsRoot says whether or not this module diff is for the root module. -func (m *ModuleState) IsRoot() bool { - m.Lock() - defer m.Unlock() - return reflect.DeepEqual(m.Path, rootModulePath) -} - -// IsDescendent returns true if other is a descendent of this module. -func (m *ModuleState) IsDescendent(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - i := len(m.Path) - return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) -} - -// Orphans returns a list of keys of resources that are in the State -// but aren't present in the configuration itself. Hence, these keys -// represent the state of resources that are orphans. -func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { - m.Lock() - defer m.Unlock() - - inConfig := make(map[string]struct{}) - if c != nil { - for _, r := range c.ManagedResources { - inConfig[r.Addr().String()] = struct{}{} - } - for _, r := range c.DataResources { - inConfig[r.Addr().String()] = struct{}{} - } - } - - var result []addrs.ResourceInstance - for k := range m.Resources { - // Since we've not yet updated state to use our new address format, - // we need to do some shimming here. - legacyAddr, err := parseResourceAddressInternal(k) - if err != nil { - // Suggests that the user tampered with the state, since we always - // generate valid internal addresses. - log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) - continue - } - - addr := legacyAddr.AbsResourceInstanceAddr().Resource - compareKey := addr.Resource.String() // compare by resource address, ignoring instance key - if _, exists := inConfig[compareKey]; !exists { - result = append(result, addr) - } - } - return result -} - -// RemovedOutputs returns a list of outputs that are in the State but aren't -// present in the configuration itself. -func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { - if outputs == nil { - // If we got no output map at all then we'll just treat our set of - // configured outputs as empty, since that suggests that they've all - // been removed by removing their containing module. - outputs = make(map[string]*configs.Output) - } - - s.Lock() - defer s.Unlock() - - var ret []addrs.OutputValue - for n := range s.Outputs { - if _, declared := outputs[n]; !declared { - ret = append(ret, addrs.OutputValue{ - Name: n, - }) - } - } - - return ret -} - -// View returns a view with the given resource prefix. -func (m *ModuleState) View(id string) *ModuleState { - if m == nil { - return m - } - - r := m.deepcopy() - for k := range r.Resources { - if id == k || strings.HasPrefix(k, id+".") { - continue - } - - delete(r.Resources, k) - } - - return r -} - func (m *ModuleState) init() { m.Lock() defer m.Unlock() @@ -1144,19 +917,6 @@ func (m *ModuleState) init() { } } -func (m *ModuleState) deepcopy() *ModuleState { - if m == nil { - return nil - } - - stateCopy, err := copystructure.Config{Lock: true}.Copy(m) - if err != nil { - panic(err) - } - - return stateCopy.(*ModuleState) -} - // prune is used to remove any resources that are no longer required func (m *ModuleState) prune() { m.Lock() @@ -1304,10 +1064,6 @@ func (m *ModuleState) String() string { return buf.String() } -func (m *ModuleState) Empty() bool { - return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 -} - // ResourceStateKey is a structured representation of the key used for the // ModuleState.Resources mapping type ResourceStateKey struct { @@ -1360,7 +1116,7 @@ func (rsk *ResourceStateKey) String() string { // ModuleState.Resources and returns a resource name and resource index. In the // state, a resource has the format "type.name.index" or "type.name". In the // latter case, the index is returned as -1. -func ParseResourceStateKey(k string) (*ResourceStateKey, error) { +func parseResourceStateKey(k string) (*ResourceStateKey, error) { parts := strings.Split(k, ".") mode := ManagedResourceMode if len(parts) > 0 && parts[0] == "data" { @@ -1503,24 +1259,6 @@ func (s *ResourceState) Untaint() { } } -// ProviderAddr returns the provider address for the receiver, by parsing the -// string representation saved in state. An error can be returned if the -// value in state is corrupt. -func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { - var diags tfdiags.Diagnostics - - str := s.Provider - traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(travDiags) - if travDiags.HasErrors() { - return addrs.AbsProviderConfig{}, diags.Err() - } - - addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) - diags = diags.Append(addrDiags) - return addr, diags.Err() -} - func (s *ResourceState) init() { s.Lock() defer s.Unlock() @@ -1598,6 +1336,8 @@ type InstanceState struct { // and collections. Meta map[string]interface{} `json:"meta"` + ProviderMeta cty.Value + // Tainted is used to mark a resource for recreation. Tainted bool `json:"tainted"` @@ -1859,283 +1599,6 @@ func (e *EphemeralState) init() { } } -func (e *EphemeralState) DeepCopy() *EphemeralState { - copy, err := copystructure.Config{Lock: true}.Copy(e) - if err != nil { - panic(err) - } - - return copy.(*EphemeralState) -} - -type jsonStateVersionIdentifier struct { - Version int `json:"version"` -} - -// Check if this is a V0 format - the magic bytes at the start of the file -// should be "tfstate" if so. We no longer support upgrading this type of -// state but return an error message explaining to a user how they can -// upgrade via the 0.6.x series. -func testForV0State(buf *bufio.Reader) error { - start, err := buf.Peek(len("tfstate")) - if err != nil { - return fmt.Errorf("Failed to check for magic bytes: %v", err) - } - if string(start) == "tfstate" { - return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + - "format which was used prior to Terraform 0.3. Please upgrade\n" + - "this state file using Terraform 0.6.16 prior to using it with\n" + - "Terraform 0.7.") - } - - return nil -} - -// ErrNoState is returned by ReadState when the io.Reader contains no data -var ErrNoState = errors.New("no state") - -// ReadState reads a state structure out of a reader in the format that -// was written by WriteState. -func ReadState(src io.Reader) (*State, error) { - // check for a nil file specifically, since that produces a platform - // specific error if we try to use it in a bufio.Reader. - if f, ok := src.(*os.File); ok && f == nil { - return nil, ErrNoState - } - - buf := bufio.NewReader(src) - - if _, err := buf.Peek(1); err != nil { - if err == io.EOF { - return nil, ErrNoState - } - return nil, err - } - - if err := testForV0State(buf); err != nil { - return nil, err - } - - // If we are JSON we buffer the whole thing in memory so we can read it twice. - // This is suboptimal, but will work for now. - jsonBytes, err := ioutil.ReadAll(buf) - if err != nil { - return nil, fmt.Errorf("Reading state file failed: %v", err) - } - - versionIdentifier := &jsonStateVersionIdentifier{} - if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { - return nil, fmt.Errorf("Decoding state file version failed: %v", err) - } - - var result *State - switch versionIdentifier.Version { - case 0: - return nil, fmt.Errorf("State version 0 is not supported as JSON.") - case 1: - v1State, err := ReadStateV1(jsonBytes) - if err != nil { - return nil, err - } - - v2State, err := upgradeStateV1ToV2(v1State) - if err != nil { - return nil, err - } - - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - // increment the Serial whenever we upgrade state - v3State.Serial++ - result = v3State - case 2: - v2State, err := ReadStateV2(jsonBytes) - if err != nil { - return nil, err - } - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - v3State.Serial++ - result = v3State - case 3: - v3State, err := ReadStateV3(jsonBytes) - if err != nil { - return nil, err - } - - result = v3State - default: - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), versionIdentifier.Version) - } - - // If we reached this place we must have a result set - if result == nil { - panic("resulting state in load not set, assertion failed") - } - - // Prune the state when read it. Its possible to write unpruned states or - // for a user to make a state unpruned (nil-ing a module state for example). - result.prune() - - // Validate the state file is valid - if err := result.Validate(); err != nil { - return nil, err - } - - return result, nil -} - -func ReadStateV1(jsonBytes []byte) (*stateV1, error) { - v1State := &stateV1{} - if err := json.Unmarshal(jsonBytes, v1State); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - if v1State.Version != 1 { - return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ - "read %d, expected 1", v1State.Version) - } - - return v1State, nil -} - -func ReadStateV2(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - return state, nil -} - -func ReadStateV3(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - // Now we write the state back out to detect any changes in normaliztion. - // If our state is now written out differently, bump the serial number to - // prevent conflicts. - var buf bytes.Buffer - err := WriteState(state, &buf) - if err != nil { - return nil, err - } - - if !bytes.Equal(jsonBytes, buf.Bytes()) { - log.Println("[INFO] state modified during read or write. incrementing serial number") - state.Serial++ - } - - return state, nil -} - -// WriteState writes a state somewhere in a binary format. -func WriteState(d *State, dst io.Writer) error { - // writing a nil state is a noop. - if d == nil { - return nil - } - - // make sure we have no uninitialized fields - d.init() - - // Make sure it is sorted - d.sort() - - // Ensure the version is set - d.Version = StateVersion - - // If the TFVersion is set, verify it. We used to just set the version - // here, but this isn't safe since it changes the MD5 sum on some remote - // state storage backends such as Atlas. We now leave it be if needed. - if d.TFVersion != "" { - if _, err := version.NewVersion(d.TFVersion); err != nil { - return fmt.Errorf( - "Error writing state, invalid version: %s\n\n"+ - "The Terraform version when writing the state must be a semantic\n"+ - "version.", - d.TFVersion) - } - } - - // Encode the data in a human-friendly way - data, err := json.MarshalIndent(d, "", " ") - if err != nil { - return fmt.Errorf("Failed to encode state: %s", err) - } - - // We append a newline to the data because MarshalIndent doesn't - data = append(data, '\n') - - // Write the data out to the dst - if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { - return fmt.Errorf("Failed to write state: %v", err) - } - - return nil -} - // resourceNameSort implements the sort.Interface to sort name parts lexically for // strings and numerically for integer indexes. type resourceNameSort []string diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go similarity index 77% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go index 2dcb11b7..01d03927 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go @@ -5,7 +5,7 @@ import ( "sort" ) -// StateFilter is responsible for filtering and searching a state. +// stateFilter is responsible for filtering and searching a state. // // This is a separate struct from State rather than a method on State // because StateFilter might create sidecar data structures to optimize @@ -15,18 +15,18 @@ import ( // Reset should be called or a new one should be allocated. StateFilter // will not watch State for changes and do this for you. If you filter after // changing the State without calling Reset, the behavior is not defined. -type StateFilter struct { +type stateFilter struct { State *State } // Filter takes the addresses specified by fs and finds all the matches. // The values of fs are resource addressing syntax that can be parsed by -// ParseResourceAddress. -func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { +// parseResourceAddress. +func (f *stateFilter) filter(fs ...string) ([]*stateFilterResult, error) { // Parse all the addresses - as := make([]*ResourceAddress, len(fs)) + as := make([]*resourceAddress, len(fs)) for i, v := range fs { - a, err := ParseResourceAddress(v) + a, err := parseResourceAddress(v) if err != nil { return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) } @@ -36,12 +36,12 @@ func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { // If we weren't given any filters, then we list all if len(fs) == 0 { - as = append(as, &ResourceAddress{Index: -1}) + as = append(as, &resourceAddress{Index: -1}) } // Filter each of the address. We keep track of this in a map to // strip duplicates. - resultSet := make(map[string]*StateFilterResult) + resultSet := make(map[string]*stateFilterResult) for _, a := range as { for _, r := range f.filterSingle(a) { resultSet[r.String()] = r @@ -49,19 +49,19 @@ func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { } // Make the result list - results := make([]*StateFilterResult, 0, len(resultSet)) + results := make([]*stateFilterResult, 0, len(resultSet)) for _, v := range resultSet { results = append(results, v) } // Sort them and return - sort.Sort(StateFilterResultSlice(results)) + sort.Sort(stateFilterResultSlice(results)) return results, nil } -func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { +func (f *stateFilter) filterSingle(a *resourceAddress) []*stateFilterResult { // The slice to keep track of results - var results []*StateFilterResult + var results []*stateFilterResult // Go through modules first. modules := make([]*ModuleState, 0, len(f.State.Modules)) @@ -72,9 +72,9 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { // Only add the module to the results if we haven't specified a type. // We also ignore the root module. if a.Type == "" && len(m.Path) > 1 { - results = append(results, &StateFilterResult{ + results = append(results, &stateFilterResult{ Path: m.Path[1:], - Address: (&ResourceAddress{Path: m.Path[1:]}).String(), + Address: (&resourceAddress{Path: m.Path[1:]}).String(), Value: m, }) } @@ -86,7 +86,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { for _, m := range modules { for n, r := range m.Resources { // The name in the state contains valuable information. Parse. - key, err := ParseResourceStateKey(n) + key, err := parseResourceStateKey(n) if err != nil { // If we get an error parsing, then just ignore it // out of the state. @@ -116,7 +116,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { } // Build the address for this resource - addr := &ResourceAddress{ + addr := &resourceAddress{ Path: m.Path[1:], Name: key.Name, Type: key.Type, @@ -124,7 +124,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { } // Add the resource level result - resourceResult := &StateFilterResult{ + resourceResult := &stateFilterResult{ Path: addr.Path, Address: addr.String(), Value: r, @@ -135,9 +135,9 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { // Add the instances if r.Primary != nil { - addr.InstanceType = TypePrimary + addr.InstanceType = typePrimary addr.InstanceTypeSet = false - results = append(results, &StateFilterResult{ + results = append(results, &stateFilterResult{ Path: addr.Path, Address: addr.String(), Parent: resourceResult, @@ -147,9 +147,9 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { for _, instance := range r.Deposed { if f.relevant(a, instance) { - addr.InstanceType = TypeDeposed + addr.InstanceType = typeDeposed addr.InstanceTypeSet = true - results = append(results, &StateFilterResult{ + results = append(results, &stateFilterResult{ Path: addr.Path, Address: addr.String(), Parent: resourceResult, @@ -165,7 +165,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { } // relevant checks for relevance of this address against the given value. -func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { +func (f *stateFilter) relevant(addr *resourceAddress, raw interface{}) bool { switch v := raw.(type) { case *ModuleState: path := v.Path[1:] @@ -202,10 +202,10 @@ func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { } } -// StateFilterResult is a single result from a filter operation. Filter +// stateFilterResult is a single result from a filter operation. Filter // can match multiple things within a state (module, resource, instance, etc.) // and this unifies that. -type StateFilterResult struct { +type stateFilterResult struct { // Module path of the result Path []string @@ -215,7 +215,7 @@ type StateFilterResult struct { // Parent, if non-nil, is a parent of this result. For instances, the // parent would be a resource. For resources, the parent would be // a module. For modules, this is currently nil. - Parent *StateFilterResult + Parent *stateFilterResult // Value is the actual value. This must be type switched on. It can be // any data structures that `State` can hold: `ModuleState`, @@ -223,11 +223,11 @@ type StateFilterResult struct { Value interface{} } -func (r *StateFilterResult) String() string { +func (r *stateFilterResult) String() string { return fmt.Sprintf("%T: %s", r.Value, r.Address) } -func (r *StateFilterResult) sortedType() int { +func (r *stateFilterResult) sortedType() int { switch r.Value.(type) { case *ModuleState: return 0 @@ -240,19 +240,19 @@ func (r *StateFilterResult) sortedType() int { } } -// StateFilterResultSlice is a slice of results that implements +// stateFilterResultSlice is a slice of results that implements // sort.Interface. The sorting goal is what is most appealing to // human output. -type StateFilterResultSlice []*StateFilterResult +type stateFilterResultSlice []*stateFilterResult -func (s StateFilterResultSlice) Len() int { return len(s) } -func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s StateFilterResultSlice) Less(i, j int) bool { +func (s stateFilterResultSlice) Len() int { return len(s) } +func (s stateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s stateFilterResultSlice) Less(i, j int) bool { a, b := s[i], s[j] // if these address contain an index, we want to sort by index rather than name - addrA, errA := ParseResourceAddress(a.Address) - addrB, errB := ParseResourceAddress(b.Address) + addrA, errA := parseResourceAddress(a.Address) + addrB, errB := parseResourceAddress(b.Address) if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { return addrA.Index < addrB.Index } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go new file mode 100644 index 00000000..01ac810f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go @@ -0,0 +1,22 @@ +package terraform + +import ( + "sort" +) + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/helper.go b/vendor/github.com/hashicorp/terraform-plugin-test/helper.go index d4de4468..8a519aaf 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-test/helper.go +++ b/vendor/github.com/hashicorp/terraform-plugin-test/helper.go @@ -196,6 +196,11 @@ func symlinkAuxiliaryProviders(pluginDir string) error { return nil } +// GetPluginName returns the configured plugin name. +func (h *Helper) GetPluginName() string { + return h.pluginName +} + // Close cleans up temporary files and directories created to support this // helper, returning an error if any of the cleanup fails. // @@ -218,7 +223,7 @@ func (h *Helper) NewWorkingDir() (*WorkingDir, error) { } // symlink the provider source files into the base directory - err = symlinkDir(h.sourceDir, dir) + err = symlinkDirectoriesOnly(h.sourceDir, dir) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/util.go b/vendor/github.com/hashicorp/terraform-plugin-test/util.go index 0732c82d..57bc84f2 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-test/util.go +++ b/vendor/github.com/hashicorp/terraform-plugin-test/util.go @@ -53,3 +53,43 @@ func symlinkDir(srcDir string, destDir string) (err error) { } return } + +// symlinkDirectoriesOnly finds only the first-level child directories in srcDir +// and symlinks them into destDir. +// Unlike symlinkDir, this is done non-recursively in order to limit the number +// of file descriptors used. +func symlinkDirectoriesOnly(srcDir string, destDir string) (err error) { + srcInfo, err := os.Stat(srcDir) + if err != nil { + return err + } + + err = os.MkdirAll(destDir, srcInfo.Mode()) + if err != nil { + return err + } + + directory, err := os.Open(srcDir) + if err != nil { + return err + } + defer directory.Close() + objects, err := directory.Readdir(-1) + if err != nil { + return err + } + + for _, obj := range objects { + srcPath := filepath.Join(srcDir, obj.Name()) + destPath := filepath.Join(destDir, obj.Name()) + + if obj.IsDir() { + err = symlinkFile(srcPath, destPath) + if err != nil { + return err + } + } + + } + return +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go deleted file mode 100644 index 0dae567d..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go +++ /dev/null @@ -1,61 +0,0 @@ -package auth - -import ( - "github.com/hashicorp/terraform-svchost" -) - -// CachingCredentialsSource creates a new credentials source that wraps another -// and caches its results in memory, on a per-hostname basis. -// -// No means is provided for expiration of cached credentials, so a caching -// credentials source should have a limited lifetime (one Terraform operation, -// for example) to ensure that time-limited credentials don't expire before -// their cache entries do. -func CachingCredentialsSource(source CredentialsSource) CredentialsSource { - return &cachingCredentialsSource{ - source: source, - cache: map[svchost.Hostname]HostCredentials{}, - } -} - -type cachingCredentialsSource struct { - source CredentialsSource - cache map[svchost.Hostname]HostCredentials -} - -// ForHost passes the given hostname on to the wrapped credentials source and -// caches the result to return for future requests with the same hostname. -// -// Both credentials and non-credentials (nil) responses are cached. -// -// No cache entry is created if the wrapped source returns an error, to allow -// the caller to retry the failing operation. -func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - if cache, cached := s.cache[host]; cached { - return cache, nil - } - - result, err := s.source.ForHost(host) - if err != nil { - return result, err - } - - s.cache[host] = result - return result, nil -} - -func (s *cachingCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - // We'll delete the cache entry even if the store fails, since that just - // means that the next read will go to the real store and get a chance to - // see which object (old or new) is actually present. - delete(s.cache, host) - return s.source.StoreForHost(host, credentials) -} - -func (s *cachingCredentialsSource) ForgetForHost(host svchost.Hostname) error { - // We'll delete the cache entry even if the store fails, since that just - // means that the next read will go to the real store and get a chance to - // see if the object is still present. - delete(s.cache, host) - return s.source.ForgetForHost(host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go deleted file mode 100644 index 36441cd1..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go +++ /dev/null @@ -1,118 +0,0 @@ -// Package auth contains types and functions to manage authentication -// credentials for service hosts. -package auth - -import ( - "fmt" - "net/http" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-svchost" -) - -// Credentials is a list of CredentialsSource objects that can be tried in -// turn until one returns credentials for a host, or one returns an error. -// -// A Credentials is itself a CredentialsSource, wrapping its members. -// In principle one CredentialsSource can be nested inside another, though -// there is no good reason to do so. -// -// The write operations on a Credentials are tried only on the first object, -// under the assumption that it is the primary store. -type Credentials []CredentialsSource - -// NoCredentials is an empty CredentialsSource that always returns nil -// when asked for credentials. -var NoCredentials CredentialsSource = Credentials{} - -// A CredentialsSource is an object that may be able to provide credentials -// for a given host. -// -// Credentials lookups are not guaranteed to be concurrency-safe. Callers -// using these facilities in concurrent code must use external concurrency -// primitives to prevent race conditions. -type CredentialsSource interface { - // ForHost returns a non-nil HostCredentials if the source has credentials - // available for the host, and a nil HostCredentials if it does not. - // - // If an error is returned, progress through a list of CredentialsSources - // is halted and the error is returned to the user. - ForHost(host svchost.Hostname) (HostCredentials, error) - - // StoreForHost takes a HostCredentialsWritable and saves it as the - // credentials for the given host. - // - // If credentials are already stored for the given host, it will try to - // replace those credentials but may produce an error if such replacement - // is not possible. - StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error - - // ForgetForHost discards any stored credentials for the given host. It - // does nothing and returns successfully if no credentials are saved - // for that host. - ForgetForHost(host svchost.Hostname) error -} - -// HostCredentials represents a single set of credentials for a particular -// host. -type HostCredentials interface { - // PrepareRequest modifies the given request in-place to apply the - // receiving credentials. The usual behavior of this method is to - // add some sort of Authorization header to the request. - PrepareRequest(req *http.Request) - - // Token returns the authentication token. - Token() string -} - -// HostCredentialsWritable is an extension of HostCredentials for credentials -// objects that can be serialized as a JSON-compatible object value for -// storage. -type HostCredentialsWritable interface { - HostCredentials - - // ToStore returns a cty.Value, always of an object type, - // representing data that can be serialized to represent this object - // in persistent storage. - // - // The resulting value may uses only cty values that can be accepted - // by the cty JSON encoder, though the caller may elect to instead store - // it in some other format that has a JSON-compatible type system. - ToStore() cty.Value -} - -// ForHost iterates over the contained CredentialsSource objects and -// tries to obtain credentials for the given host from each one in turn. -// -// If any source returns either a non-nil HostCredentials or a non-nil error -// then this result is returned. Otherwise, the result is nil, nil. -func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) { - for _, source := range c { - creds, err := source.ForHost(host) - if creds != nil || err != nil { - return creds, err - } - } - return nil, nil -} - -// StoreForHost passes the given arguments to the same operation on the -// first CredentialsSource in the receiver. -func (c Credentials) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - if len(c) == 0 { - return fmt.Errorf("no credentials store is available") - } - - return c[0].StoreForHost(host, credentials) -} - -// ForgetForHost passes the given arguments to the same operation on the -// first CredentialsSource in the receiver. -func (c Credentials) ForgetForHost(host svchost.Hostname) error { - if len(c) == 0 { - return fmt.Errorf("no credentials store is available") - } - - return c[0].ForgetForHost(host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go b/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go deleted file mode 100644 index 7198c674..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go +++ /dev/null @@ -1,48 +0,0 @@ -package auth - -import ( - "github.com/zclconf/go-cty/cty" -) - -// HostCredentialsFromMap converts a map of key-value pairs from a credentials -// definition provided by the user (e.g. in a config file, or via a credentials -// helper) into a HostCredentials object if possible, or returns nil if -// no credentials could be extracted from the map. -// -// This function ignores map keys it is unfamiliar with, to allow for future -// expansion of the credentials map format for new credential types. -func HostCredentialsFromMap(m map[string]interface{}) HostCredentials { - if m == nil { - return nil - } - if token, ok := m["token"].(string); ok { - return HostCredentialsToken(token) - } - return nil -} - -// HostCredentialsFromObject converts a cty.Value of an object type into a -// HostCredentials object if possible, or returns nil if no credentials could -// be extracted from the map. -// -// This function ignores object attributes it is unfamiliar with, to allow for -// future expansion of the credentials object structure for new credential types. -// -// If the given value is not of an object type, this function will panic. -func HostCredentialsFromObject(obj cty.Value) HostCredentials { - if !obj.Type().HasAttribute("token") { - return nil - } - - tokenV := obj.GetAttr("token") - if tokenV.IsNull() || !tokenV.IsKnown() { - return nil - } - if !cty.String.Equals(tokenV.Type()) { - // Weird, but maybe some future Terraform version accepts an object - // here for some reason, so we'll be resilient. - return nil - } - - return HostCredentialsToken(tokenV.AsString()) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go deleted file mode 100644 index 76505f20..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go +++ /dev/null @@ -1,149 +0,0 @@ -package auth - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - "path/filepath" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-svchost" -) - -type helperProgramCredentialsSource struct { - executable string - args []string -} - -// HelperProgramCredentialsSource returns a CredentialsSource that runs the -// given program with the given arguments in order to obtain credentials. -// -// The given executable path must be an absolute path; it is the caller's -// responsibility to validate and process a relative path or other input -// provided by an end-user. If the given path is not absolute, this -// function will panic. -// -// When credentials are requested, the program will be run in a child process -// with the given arguments along with two additional arguments added to the -// end of the list: the literal string "get", followed by the requested -// hostname in ASCII compatibility form (punycode form). -func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource { - if !filepath.IsAbs(executable) { - panic("NewCredentialsSourceHelperProgram requires absolute path to executable") - } - - fullArgs := make([]string, len(args)+1) - fullArgs[0] = executable - copy(fullArgs[1:], args) - - return &helperProgramCredentialsSource{ - executable: executable, - args: fullArgs, - } -} - -func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "get") - args = append(args, string(host)) - - outBuf := bytes.Buffer{} - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: nil, - Stdout: &outBuf, - Stderr: &errBuf, - } - err := cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return nil, fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return nil, fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - var m map[string]interface{} - err = json.Unmarshal(outBuf.Bytes(), &m) - if err != nil { - return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err) - } - - return HostCredentialsFromMap(m), nil -} - -func (s *helperProgramCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "store") - args = append(args, string(host)) - - toStore := credentials.ToStore() - toStoreRaw, err := ctyjson.Marshal(toStore, toStore.Type()) - if err != nil { - return fmt.Errorf("can't serialize credentials to store: %s", err) - } - - inReader := bytes.NewReader(toStoreRaw) - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: inReader, - Stderr: &errBuf, - Stdout: nil, - } - err = cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - return nil -} - -func (s *helperProgramCredentialsSource) ForgetForHost(host svchost.Hostname) error { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "forget") - args = append(args, string(host)) - - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: nil, - Stderr: &errBuf, - Stdout: nil, - } - err := cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go b/vendor/github.com/hashicorp/terraform-svchost/auth/static.go deleted file mode 100644 index f8b0b076..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go +++ /dev/null @@ -1,38 +0,0 @@ -package auth - -import ( - "fmt" - - "github.com/hashicorp/terraform-svchost" -) - -// StaticCredentialsSource is a credentials source that retrieves credentials -// from the provided map. It returns nil if a requested hostname is not -// present in the map. -// -// The caller should not modify the given map after passing it to this function. -func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource { - return staticCredentialsSource(creds) -} - -type staticCredentialsSource map[svchost.Hostname]map[string]interface{} - -func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - if s == nil { - return nil, nil - } - - if m, exists := s[host]; exists { - return HostCredentialsFromMap(m), nil - } - - return nil, nil -} - -func (s staticCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - return fmt.Errorf("can't store new credentials in a static credentials source") -} - -func (s staticCredentialsSource) ForgetForHost(host svchost.Hostname) error { - return fmt.Errorf("can't discard credentials from a static credentials source") -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go deleted file mode 100644 index 1d36553a..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go +++ /dev/null @@ -1,43 +0,0 @@ -package auth - -import ( - "net/http" - - "github.com/zclconf/go-cty/cty" -) - -// HostCredentialsToken is a HostCredentials implementation that represents a -// single "bearer token", to be sent to the server via an Authorization header -// with the auth type set to "Bearer". -// -// To save a token as the credentials for a host, convert the token string to -// this type and use the result as a HostCredentialsWritable implementation. -type HostCredentialsToken string - -// Interface implementation assertions. Compilation will fail here if -// HostCredentialsToken does not fully implement these interfaces. -var _ HostCredentials = HostCredentialsToken("") -var _ HostCredentialsWritable = HostCredentialsToken("") - -// PrepareRequest alters the given HTTP request by setting its Authorization -// header to the string "Bearer " followed by the encapsulated authentication -// token. -func (tc HostCredentialsToken) PrepareRequest(req *http.Request) { - if req.Header == nil { - req.Header = http.Header{} - } - req.Header.Set("Authorization", "Bearer "+string(tc)) -} - -// Token returns the authentication token. -func (tc HostCredentialsToken) Token() string { - return string(tc) -} - -// ToStore returns a credentials object with a single attribute "token" whose -// value is the token string. -func (tc HostCredentialsToken) ToStore() cty.Value { - return cty.ObjectVal(map[string]cty.Value{ - "token": cty.StringVal(string(tc)), - }) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go deleted file mode 100644 index 97831363..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go +++ /dev/null @@ -1,275 +0,0 @@ -// Package disco handles Terraform's remote service discovery protocol. -// -// This protocol allows mapping from a service hostname, as produced by the -// svchost package, to a set of services supported by that host and the -// endpoint information for each supported service. -package disco - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "mime" - "net/http" - "net/url" - "time" - - "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/auth" -) - -const ( - // Fixed path to the discovery manifest. - discoPath = "/.well-known/terraform.json" - - // Arbitrary-but-small number to prevent runaway redirect loops. - maxRedirects = 3 - - // Arbitrary-but-small time limit to prevent UI "hangs" during discovery. - discoTimeout = 11 * time.Second - - // 1MB - to prevent abusive services from using loads of our memory. - maxDiscoDocBytes = 1 * 1024 * 1024 -) - -// httpTransport is overridden during tests, to skip TLS verification. -var httpTransport = defaultHttpTransport() - -// Disco is the main type in this package, which allows discovery on given -// hostnames and caches the results by hostname to avoid repeated requests -// for the same information. -type Disco struct { - hostCache map[svchost.Hostname]*Host - credsSrc auth.CredentialsSource - - // Transport is a custom http.RoundTripper to use. - Transport http.RoundTripper -} - -// New returns a new initialized discovery object. -func New() *Disco { - return NewWithCredentialsSource(nil) -} - -// NewWithCredentialsSource returns a new discovery object initialized with -// the given credentials source. -func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco { - return &Disco{ - hostCache: make(map[svchost.Hostname]*Host), - credsSrc: credsSrc, - Transport: httpTransport, - } -} - -func (d *Disco) SetUserAgent(uaString string) { - d.Transport = &userAgentRoundTripper{ - innerRt: d.Transport, - userAgent: uaString, - } -} - -// SetCredentialsSource provides a credentials source that will be used to -// add credentials to outgoing discovery requests, where available. -// -// If this method is never called, no outgoing discovery requests will have -// credentials. -func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { - d.credsSrc = src -} - -// CredentialsSource returns the credentials source associated with the receiver, -// or an empty credentials source if none is associated. -func (d *Disco) CredentialsSource() auth.CredentialsSource { - if d.credsSrc == nil { - // We'll return an empty one just to save the caller from having to - // protect against the nil case, since this interface already allows - // for the possibility of there being no credentials at all. - return auth.StaticCredentialsSource(nil) - } - return d.credsSrc -} - -// CredentialsForHost returns a non-nil HostCredentials if the embedded source has -// credentials available for the host, and a nil HostCredentials if it does not. -func (d *Disco) CredentialsForHost(hostname svchost.Hostname) (auth.HostCredentials, error) { - if d.credsSrc == nil { - return nil, nil - } - return d.credsSrc.ForHost(hostname) -} - -// ForceHostServices provides a pre-defined set of services for a given -// host, which prevents the receiver from attempting network-based discovery -// for the given host. Instead, the given services map will be returned -// verbatim. -// -// When providing "forced" services, any relative URLs are resolved against -// the initial discovery URL that would have been used for network-based -// discovery, yielding the same results as if the given map were published -// at the host's default discovery URL, though using absolute URLs is strongly -// recommended to make the configured behavior more explicit. -func (d *Disco) ForceHostServices(hostname svchost.Hostname, services map[string]interface{}) { - if services == nil { - services = map[string]interface{}{} - } - - d.hostCache[hostname] = &Host{ - discoURL: &url.URL{ - Scheme: "https", - Host: string(hostname), - Path: discoPath, - }, - hostname: hostname.ForDisplay(), - services: services, - transport: d.Transport, - } -} - -// Discover runs the discovery protocol against the given hostname (which must -// already have been validated and prepared with svchost.ForComparison) and -// returns an object describing the services available at that host. -// -// If a given hostname supports no Terraform services at all, a non-nil but -// empty Host object is returned. When giving feedback to the end user about -// such situations, we say "host does not provide a service", -// regardless of whether that is due to that service specifically being absent -// or due to the host not providing Terraform services at all, since we don't -// wish to expose the detail of whole-host discovery to an end-user. -func (d *Disco) Discover(hostname svchost.Hostname) (*Host, error) { - if host, cached := d.hostCache[hostname]; cached { - return host, nil - } - - host, err := d.discover(hostname) - if err != nil { - return nil, err - } - d.hostCache[hostname] = host - - return host, nil -} - -// DiscoverServiceURL is a convenience wrapper for discovery on a given -// hostname and then looking up a particular service in the result. -func (d *Disco) DiscoverServiceURL(hostname svchost.Hostname, serviceID string) (*url.URL, error) { - host, err := d.Discover(hostname) - if err != nil { - return nil, err - } - return host.ServiceURL(serviceID) -} - -// discover implements the actual discovery process, with its result cached -// by the public-facing Discover method. -func (d *Disco) discover(hostname svchost.Hostname) (*Host, error) { - discoURL := &url.URL{ - Scheme: "https", - Host: hostname.String(), - Path: discoPath, - } - - client := &http.Client{ - Transport: d.Transport, - Timeout: discoTimeout, - - CheckRedirect: func(req *http.Request, via []*http.Request) error { - log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) - if len(via) > maxRedirects { - return errors.New("too many redirects") // this error will never actually be seen - } - return nil - }, - } - - req := &http.Request{ - Header: make(http.Header), - Method: "GET", - URL: discoURL, - } - req.Header.Set("Accept", "application/json") - - creds, err := d.CredentialsForHost(hostname) - if err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", hostname, err) - } - if creds != nil { - // Update the request to include credentials. - creds.PrepareRequest(req) - } - - log.Printf("[DEBUG] Service discovery for %s at %s", hostname, discoURL) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Failed to request discovery document: %v", err) - } - defer resp.Body.Close() - - host := &Host{ - // Use the discovery URL from resp.Request in - // case the client followed any redirects. - discoURL: resp.Request.URL, - hostname: hostname.ForDisplay(), - transport: d.Transport, - } - - // Return the host without any services. - if resp.StatusCode == 404 { - return host, nil - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("Failed to request discovery document: %s", resp.Status) - } - - contentType := resp.Header.Get("Content-Type") - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, fmt.Errorf("Discovery URL has a malformed Content-Type %q", contentType) - } - if mediaType != "application/json" { - return nil, fmt.Errorf("Discovery URL returned an unsupported Content-Type %q", mediaType) - } - - // This doesn't catch chunked encoding, because ContentLength is -1 in that case. - if resp.ContentLength > maxDiscoDocBytes { - // Size limit here is not a contractual requirement and so we may - // adjust it over time if we find a different limit is warranted. - return nil, fmt.Errorf( - "Discovery doc response is too large (got %d bytes; limit %d)", - resp.ContentLength, maxDiscoDocBytes, - ) - } - - // If the response is using chunked encoding then we can't predict its - // size, but we'll at least prevent reading the entire thing into memory. - lr := io.LimitReader(resp.Body, maxDiscoDocBytes) - - servicesBytes, err := ioutil.ReadAll(lr) - if err != nil { - return nil, fmt.Errorf("Error reading discovery document body: %v", err) - } - - var services map[string]interface{} - err = json.Unmarshal(servicesBytes, &services) - if err != nil { - return nil, fmt.Errorf("Failed to decode discovery document as a JSON object: %v", err) - } - host.services = services - - return host, nil -} - -// Forget invalidates any cached record of the given hostname. If the host -// has no cache entry then this is a no-op. -func (d *Disco) Forget(hostname svchost.Hostname) { - delete(d.hostCache, hostname) -} - -// ForgetAll is like Forget, but for all of the hostnames that have cache entries. -func (d *Disco) ForgetAll() { - d.hostCache = make(map[svchost.Hostname]*Host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/host.go b/vendor/github.com/hashicorp/terraform-svchost/disco/host.go deleted file mode 100644 index 2d0fc9f1..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/host.go +++ /dev/null @@ -1,412 +0,0 @@ -package disco - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-version" -) - -const versionServiceID = "versions.v1" - -// Host represents a service discovered host. -type Host struct { - discoURL *url.URL - hostname string - services map[string]interface{} - transport http.RoundTripper -} - -// Constraints represents the version constraints of a service. -type Constraints struct { - Service string `json:"service"` - Product string `json:"product"` - Minimum string `json:"minimum"` - Maximum string `json:"maximum"` - Excluding []string `json:"excluding"` -} - -// ErrServiceNotProvided is returned when the service is not provided. -type ErrServiceNotProvided struct { - hostname string - service string -} - -// Error returns a customized error message. -func (e *ErrServiceNotProvided) Error() string { - if e.hostname == "" { - return fmt.Sprintf("host does not provide a %s service", e.service) - } - return fmt.Sprintf("host %s does not provide a %s service", e.hostname, e.service) -} - -// ErrVersionNotSupported is returned when the version is not supported. -type ErrVersionNotSupported struct { - hostname string - service string - version string -} - -// Error returns a customized error message. -func (e *ErrVersionNotSupported) Error() string { - if e.hostname == "" { - return fmt.Sprintf("host does not support %s version %s", e.service, e.version) - } - return fmt.Sprintf("host %s does not support %s version %s", e.hostname, e.service, e.version) -} - -// ErrNoVersionConstraints is returned when checkpoint was disabled -// or the endpoint to query for version constraints was unavailable. -type ErrNoVersionConstraints struct { - disabled bool -} - -// Error returns a customized error message. -func (e *ErrNoVersionConstraints) Error() string { - if e.disabled { - return "checkpoint disabled" - } - return "unable to contact versions service" -} - -// ServiceURL returns the URL associated with the given service identifier, -// which should be of the form "servicename.vN". -// -// A non-nil result is always an absolute URL with a scheme of either HTTPS -// or HTTP. -func (h *Host) ServiceURL(id string) (*url.URL, error) { - svc, ver, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - urlStr, ok := h.services[id].(string) - if !ok { - // See if we have a matching service as that would indicate - // the service is supported, but not the requested version. - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - return nil, &ErrVersionNotSupported{ - hostname: h.hostname, - service: svc, - version: ver.Original(), - } - } - } - - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse service URL: %v", err) - } - - return u, nil -} - -// ServiceOAuthClient returns the OAuth client configuration associated with the -// given service identifier, which should be of the form "servicename.vN". -// -// This is an alternative to ServiceURL for unusual services that require -// a full OAuth2 client definition rather than just a URL. Use this only -// for services whose specification calls for this sort of definition. -func (h *Host) ServiceOAuthClient(id string) (*OAuthClient, error) { - svc, ver, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - if _, ok := h.services[id]; !ok { - // See if we have a matching service as that would indicate - // the service is supported, but not the requested version. - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - return nil, &ErrVersionNotSupported{ - hostname: h.hostname, - service: svc, - version: ver.Original(), - } - } - } - - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - var raw map[string]interface{} - switch v := h.services[id].(type) { - case map[string]interface{}: - raw = v // Great! - case []map[string]interface{}: - // An absolutely infuriating legacy HCL ambiguity. - raw = v[0] - default: - // Debug message because raw Go types don't belong in our UI. - log.Printf("[DEBUG] The definition for %s has Go type %T", id, h.services[id]) - return nil, fmt.Errorf("Service %s must be declared with an object value in the service discovery document", id) - } - - var grantTypes OAuthGrantTypeSet - if rawGTs, ok := raw["grant_types"]; ok { - if gts, ok := rawGTs.([]interface{}); ok { - var kws []string - for _, gtI := range gts { - gt, ok := gtI.(string) - if !ok { - // We'll ignore this so that we can potentially introduce - // other types into this array later if we need to. - continue - } - kws = append(kws, gt) - } - grantTypes = NewOAuthGrantTypeSet(kws...) - } else { - return nil, fmt.Errorf("Service %s is defined with invalid grant_types property: must be an array of grant type strings", id) - } - } else { - grantTypes = NewOAuthGrantTypeSet("authz_code") - } - - ret := &OAuthClient{ - SupportedGrantTypes: grantTypes, - } - if clientIDStr, ok := raw["client"].(string); ok { - ret.ID = clientIDStr - } else { - return nil, fmt.Errorf("Service %s definition is missing required property \"client\"", id) - } - if urlStr, ok := raw["authz"].(string); ok { - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse authorization URL: %v", err) - } - ret.AuthorizationURL = u - } else { - if grantTypes.RequiresAuthorizationEndpoint() { - return nil, fmt.Errorf("Service %s definition is missing required property \"authz\"", id) - } - } - if urlStr, ok := raw["token"].(string); ok { - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse token URL: %v", err) - } - ret.TokenURL = u - } else { - if grantTypes.RequiresTokenEndpoint() { - return nil, fmt.Errorf("Service %s definition is missing required property \"token\"", id) - } - } - if portsRaw, ok := raw["ports"].([]interface{}); ok { - if len(portsRaw) != 2 { - return nil, fmt.Errorf("Invalid \"ports\" definition for service %s: must be a two-element array", id) - } - invalidPortsErr := fmt.Errorf("Invalid \"ports\" definition for service %s: both ports must be whole numbers between 1024 and 65535", id) - ports := make([]uint16, 2) - for i := range ports { - switch v := portsRaw[i].(type) { - case float64: - // JSON unmarshaling always produces float64. HCL 2 might, if - // an invalid fractional number were given. - if float64(uint16(v)) != v || v < 1024 { - return nil, invalidPortsErr - } - ports[i] = uint16(v) - case int: - // Legacy HCL produces int. HCL 2 will too, if the given number - // is a whole number. - if v < 1024 || v > 65535 { - return nil, invalidPortsErr - } - ports[i] = uint16(v) - default: - // Debug message because raw Go types don't belong in our UI. - log.Printf("[DEBUG] Port value %d has Go type %T", i, portsRaw[i]) - return nil, invalidPortsErr - } - } - if ports[1] < ports[0] { - return nil, fmt.Errorf("Invalid \"ports\" definition for service %s: minimum port cannot be greater than maximum port", id) - } - ret.MinPort = ports[0] - ret.MaxPort = ports[1] - } else { - // Default is to accept any port in the range, for a client that is - // able to call back to any localhost port. - ret.MinPort = 1024 - ret.MaxPort = 65535 - } - - return ret, nil -} - -func (h *Host) parseURL(urlStr string) (*url.URL, error) { - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - - // Make relative URLs absolute using our discovery URL. - if !u.IsAbs() { - u = h.discoURL.ResolveReference(u) - } - - if u.Scheme != "https" && u.Scheme != "http" { - return nil, fmt.Errorf("unsupported scheme %s", u.Scheme) - } - if u.User != nil { - return nil, fmt.Errorf("embedded username/password information is not permitted") - } - - // Fragment part is irrelevant, since we're not a browser. - u.Fragment = "" - - return u, nil -} - -// VersionConstraints returns the contraints for a given service identifier -// (which should be of the form "servicename.vN") and product. -// -// When an exact (service and version) match is found, the constraints for -// that service are returned. -// -// When the requested version is not provided but the service is, we will -// search for all alternative versions. If mutliple alternative versions -// are found, the contrains of the latest available version are returned. -// -// When a service is not provided at all an error will be returned instead. -// -// When checkpoint is disabled or when a 404 is returned after making the -// HTTP call, an ErrNoVersionConstraints error will be returned. -func (h *Host) VersionConstraints(id, product string) (*Constraints, error) { - svc, _, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // Return early if checkpoint is disabled. - if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { - return nil, &ErrNoVersionConstraints{disabled: true} - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - // Try to get the service URL for the version service and - // return early if the service isn't provided by the host. - u, err := h.ServiceURL(versionServiceID) - if err != nil { - return nil, err - } - - // Check if we have an exact (service and version) match. - if _, ok := h.services[id].(string); !ok { - // If we don't have an exact match, we search for all matching - // services and then use the service ID of the latest version. - var services []string - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - services = append(services, serviceID) - } - } - - if len(services) == 0 { - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - // Set id to the latest service ID we found. - var latest *version.Version - for _, serviceID := range services { - if _, ver, err := parseServiceID(serviceID); err == nil { - if latest == nil || latest.LessThan(ver) { - id = serviceID - latest = ver - } - } - } - } - - // Set a default timeout of 1 sec for the versions request (in milliseconds) - timeout := 1000 - if v, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { - timeout = v - } - - client := &http.Client{ - Transport: h.transport, - Timeout: time.Duration(timeout) * time.Millisecond, - } - - // Prepare the service URL by setting the service and product. - v := u.Query() - v.Set("product", product) - u.Path += id - u.RawQuery = v.Encode() - - // Create a new request. - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, fmt.Errorf("Failed to create version constraints request: %v", err) - } - req.Header.Set("Accept", "application/json") - - log.Printf("[DEBUG] Retrieve version constraints for service %s and product %s", id, product) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Failed to request version constraints: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode == 404 { - return nil, &ErrNoVersionConstraints{disabled: false} - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("Failed to request version constraints: %s", resp.Status) - } - - // Parse the constraints from the response body. - result := &Constraints{} - if err := json.NewDecoder(resp.Body).Decode(result); err != nil { - return nil, fmt.Errorf("Error parsing version constraints: %v", err) - } - - return result, nil -} - -func parseServiceID(id string) (string, *version.Version, error) { - parts := strings.SplitN(id, ".", 2) - if len(parts) != 2 { - return "", nil, fmt.Errorf("Invalid service ID format (i.e. service.vN): %s", id) - } - - version, err := version.NewVersion(parts[1]) - if err != nil { - return "", nil, fmt.Errorf("Invalid service version: %v", err) - } - - return parts[0], version, nil -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go b/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go deleted file mode 100644 index 7e4a3856..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go +++ /dev/null @@ -1,30 +0,0 @@ -package disco - -import ( - "net/http" - - "github.com/hashicorp/go-cleanhttp" -) - -const DefaultUserAgent = "terraform-svchost/1.0" - -func defaultHttpTransport() http.RoundTripper { - t := cleanhttp.DefaultPooledTransport() - return &userAgentRoundTripper{ - innerRt: t, - userAgent: DefaultUserAgent, - } -} - -type userAgentRoundTripper struct { - innerRt http.RoundTripper - userAgent string -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", rt.userAgent) - } - - return rt.innerRt.RoundTrip(req) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go b/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go deleted file mode 100644 index 9308bbf7..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go +++ /dev/null @@ -1,178 +0,0 @@ -package disco - -import ( - "fmt" - "net/url" - "strings" - - "golang.org/x/oauth2" -) - -// OAuthClient represents an OAuth client configuration, which is used for -// unusual services that require an entire OAuth client configuration as part -// of their service discovery, rather than just a URL. -type OAuthClient struct { - // ID is the identifier for the client, to be used as "client_id" in - // OAuth requests. - ID string - - // Authorization URL is the URL of the authorization endpoint that must - // be used for this OAuth client, as defined in the OAuth2 specifications. - // - // Not all grant types use the authorization endpoint, so it may be omitted - // if none of the grant types in SupportedGrantTypes require it. - AuthorizationURL *url.URL - - // Token URL is the URL of the token endpoint that must be used for this - // OAuth client, as defined in the OAuth2 specifications. - // - // Not all grant types use the token endpoint, so it may be omitted - // if none of the grant types in SupportedGrantTypes require it. - TokenURL *url.URL - - // MinPort and MaxPort define a range of TCP ports on localhost that this - // client is able to use as redirect_uri in an authorization request. - // Terraform will select a port from this range for the temporary HTTP - // server it creates to receive the authorization response, giving - // a URL like http://localhost:NNN/ where NNN is the selected port number. - // - // Terraform will reject any port numbers in this range less than 1024, - // to respect the common convention (enforced on some operating systems) - // that lower port numbers are reserved for "privileged" services. - MinPort, MaxPort uint16 - - // SupportedGrantTypes is a set of the grant types that the client may - // choose from. This includes an entry for each distinct type advertised - // by the server, even if a particular keyword is not supported by the - // current version of Terraform. - SupportedGrantTypes OAuthGrantTypeSet -} - -// Endpoint returns an oauth2.Endpoint value ready to be used with the oauth2 -// library, representing the URLs from the receiver. -func (c *OAuthClient) Endpoint() oauth2.Endpoint { - ep := oauth2.Endpoint{ - // We don't actually auth because we're not a server-based OAuth client, - // so this instead just means that we include client_id as an argument - // in our requests. - AuthStyle: oauth2.AuthStyleInParams, - } - - if c.AuthorizationURL != nil { - ep.AuthURL = c.AuthorizationURL.String() - } - if c.TokenURL != nil { - ep.TokenURL = c.TokenURL.String() - } - - return ep -} - -// OAuthGrantType is an enumeration of grant type strings that a host can -// advertise support for. -// -// Values of this type don't necessarily match with a known constant of the -// type, because they may represent grant type keywords defined in a later -// version of Terraform which this version doesn't yet know about. -type OAuthGrantType string - -const ( - // OAuthAuthzCodeGrant represents an authorization code grant, as - // defined in IETF RFC 6749 section 4.1. - OAuthAuthzCodeGrant = OAuthGrantType("authz_code") - - // OAuthOwnerPasswordGrant represents a resource owner password - // credentials grant, as defined in IETF RFC 6749 section 4.3. - OAuthOwnerPasswordGrant = OAuthGrantType("password") -) - -// UsesAuthorizationEndpoint returns true if the receiving grant type makes -// use of the authorization endpoint from the client configuration, and thus -// if the authorization endpoint ought to be required. -func (t OAuthGrantType) UsesAuthorizationEndpoint() bool { - switch t { - case OAuthAuthzCodeGrant: - return true - case OAuthOwnerPasswordGrant: - return false - default: - // We'll default to false so that we don't impose any requirements - // on any grant type keywords that might be defined for future - // versions of Terraform. - return false - } -} - -// UsesTokenEndpoint returns true if the receiving grant type makes -// use of the token endpoint from the client configuration, and thus -// if the authorization endpoint ought to be required. -func (t OAuthGrantType) UsesTokenEndpoint() bool { - switch t { - case OAuthAuthzCodeGrant: - return true - case OAuthOwnerPasswordGrant: - return true - default: - // We'll default to false so that we don't impose any requirements - // on any grant type keywords that might be defined for future - // versions of Terraform. - return false - } -} - -// OAuthGrantTypeSet represents a set of OAuthGrantType values. -type OAuthGrantTypeSet map[OAuthGrantType]struct{} - -// NewOAuthGrantTypeSet constructs a new grant type set from the given list -// of grant type keyword strings. Any duplicates in the list are ignored. -func NewOAuthGrantTypeSet(keywords ...string) OAuthGrantTypeSet { - ret := make(OAuthGrantTypeSet, len(keywords)) - for _, kw := range keywords { - ret[OAuthGrantType(kw)] = struct{}{} - } - return ret -} - -// Has returns true if the given grant type is in the receiving set. -func (s OAuthGrantTypeSet) Has(t OAuthGrantType) bool { - _, ok := s[t] - return ok -} - -// RequiresAuthorizationEndpoint returns true if any of the grant types in -// the set are known to require an authorization endpoint. -func (s OAuthGrantTypeSet) RequiresAuthorizationEndpoint() bool { - for t := range s { - if t.UsesAuthorizationEndpoint() { - return true - } - } - return false -} - -// RequiresTokenEndpoint returns true if any of the grant types in -// the set are known to require a token endpoint. -func (s OAuthGrantTypeSet) RequiresTokenEndpoint() bool { - for t := range s { - if t.UsesTokenEndpoint() { - return true - } - } - return false -} - -// GoString implements fmt.GoStringer. -func (s OAuthGrantTypeSet) GoString() string { - var buf strings.Builder - i := 0 - buf.WriteString("disco.NewOAuthGrantTypeSet(") - for t := range s { - if i > 0 { - buf.WriteString(", ") - } - fmt.Fprintf(&buf, "%q", string(t)) - i++ - } - buf.WriteString(")") - return buf.String() -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/go.mod b/vendor/github.com/hashicorp/terraform-svchost/go.mod deleted file mode 100644 index 8f29e4ac..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/hashicorp/terraform-svchost - -go 1.12 - -require ( - github.com/google/go-cmp v0.3.1 - github.com/hashicorp/go-cleanhttp v0.5.1 - github.com/hashicorp/go-version v1.2.0 - github.com/zclconf/go-cty v1.1.0 - golang.org/x/net v0.0.0-20191009170851-d66e71096ffb - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 -) diff --git a/vendor/github.com/hashicorp/terraform-svchost/go.sum b/vendor/github.com/hashicorp/terraform-svchost/go.sum deleted file mode 100644 index 9ad1712f..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/go.sum +++ /dev/null @@ -1,36 +0,0 @@ -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/zclconf/go-cty v1.1.0 h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb h1:TR699M2v0qoKTOHxeLgp6zPqaQNs74f01a/ob9W0qko= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/terraform-svchost/label_iter.go b/vendor/github.com/hashicorp/terraform-svchost/label_iter.go deleted file mode 100644 index af8ccbab..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/label_iter.go +++ /dev/null @@ -1,69 +0,0 @@ -package svchost - -import ( - "strings" -) - -// A labelIter allows iterating over domain name labels. -// -// This type is copied from golang.org/x/net/idna, where it is used -// to segment hostnames into their separate labels for analysis. We use -// it for the same purpose here, in ForComparison. -type labelIter struct { - orig string - slice []string - curStart int - curEnd int - i int -} - -func (l *labelIter) reset() { - l.curStart = 0 - l.curEnd = 0 - l.i = 0 -} - -func (l *labelIter) done() bool { - return l.curStart >= len(l.orig) -} - -func (l *labelIter) result() string { - if l.slice != nil { - return strings.Join(l.slice, ".") - } - return l.orig -} - -func (l *labelIter) label() string { - if l.slice != nil { - return l.slice[l.i] - } - p := strings.IndexByte(l.orig[l.curStart:], '.') - l.curEnd = l.curStart + p - if p == -1 { - l.curEnd = len(l.orig) - } - return l.orig[l.curStart:l.curEnd] -} - -// next sets the value to the next label. It skips the last label if it is empty. -func (l *labelIter) next() { - l.i++ - if l.slice != nil { - if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { - l.curStart = len(l.orig) - } - } else { - l.curStart = l.curEnd + 1 - if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { - l.curStart = len(l.orig) - } - } -} - -func (l *labelIter) set(s string) { - if l.slice == nil { - l.slice = strings.Split(l.orig, ".") - } - l.slice[l.i] = s -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/svchost.go b/vendor/github.com/hashicorp/terraform-svchost/svchost.go deleted file mode 100644 index 4060b767..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/svchost.go +++ /dev/null @@ -1,207 +0,0 @@ -// Package svchost deals with the representations of the so-called "friendly -// hostnames" that we use to represent systems that provide Terraform-native -// remote services, such as module registry, remote operations, etc. -// -// Friendly hostnames are specified such that, as much as possible, they -// are consistent with how web browsers think of hostnames, so that users -// can bring their intuitions about how hostnames behave when they access -// a Terraform Enterprise instance's web UI (or indeed any other website) -// and have this behave in a similar way. -package svchost - -import ( - "errors" - "fmt" - "strconv" - "strings" - - "golang.org/x/net/idna" -) - -// Hostname is specialized name for string that indicates that the string -// has been converted to (or was already in) the storage and comparison form. -// -// Hostname values are not suitable for display in the user-interface. Use -// the ForDisplay method to obtain a form suitable for display in the UI. -// -// Unlike user-supplied hostnames, strings of type Hostname (assuming they -// were constructed by a function within this package) can be compared for -// equality using the standard Go == operator. -type Hostname string - -// acePrefix is the ASCII Compatible Encoding prefix, used to indicate that -// a domain name label is in "punycode" form. -const acePrefix = "xn--" - -// displayProfile is a very liberal idna profile that we use to do -// normalization for display without imposing validation rules. -var displayProfile = idna.New( - idna.MapForLookup(), - idna.Transitional(true), -) - -// ForDisplay takes a user-specified hostname and returns a normalized form of -// it suitable for display in the UI. -// -// If the input is so invalid that no normalization can be performed then -// this will return the input, assuming that the caller still wants to -// display _something_. This function is, however, more tolerant than the -// other functions in this package and will make a best effort to prepare -// _any_ given hostname for display. -// -// For validation, use either IsValid (for explicit validation) or -// ForComparison (which implicitly validates, returning an error if invalid). -func ForDisplay(given string) string { - var portPortion string - if colonPos := strings.Index(given, ":"); colonPos != -1 { - given, portPortion = given[:colonPos], given[colonPos:] - } - portPortion, _ = normalizePortPortion(portPortion) - - ascii, err := displayProfile.ToASCII(given) - if err != nil { - return given + portPortion - } - display, err := displayProfile.ToUnicode(ascii) - if err != nil { - return given + portPortion - } - return display + portPortion -} - -// IsValid returns true if the given user-specified hostname is a valid -// service hostname. -// -// Validity is determined by complying with the RFC 5891 requirements for -// names that are valid for domain lookup (section 5), with the additional -// requirement that user-supplied forms must not _already_ contain -// Punycode segments. -func IsValid(given string) bool { - _, err := ForComparison(given) - return err == nil -} - -// ForComparison takes a user-specified hostname and returns a normalized -// form of it suitable for storage and comparison. The result is not suitable -// for display to end-users because it uses Punycode to represent non-ASCII -// characters, and this form is unreadable for non-ASCII-speaking humans. -// -// The result is typed as Hostname -- a specialized name for string -- so that -// other APIs can make it clear within the type system whether they expect a -// user-specified or display-form hostname or a value already normalized for -// comparison. -// -// The returned Hostname is not valid if the returned error is non-nil. -func ForComparison(given string) (Hostname, error) { - var portPortion string - if colonPos := strings.Index(given, ":"); colonPos != -1 { - given, portPortion = given[:colonPos], given[colonPos:] - } - - var err error - portPortion, err = normalizePortPortion(portPortion) - if err != nil { - return Hostname(""), err - } - - if given == "" { - return Hostname(""), fmt.Errorf("empty string is not a valid hostname") - } - - // First we'll apply our additional constraint that Punycode must not - // be given directly by the user. This is not an IDN specification - // requirement, but we prohibit it to force users to use human-readable - // hostname forms within Terraform configuration. - labels := labelIter{orig: given} - for ; !labels.done(); labels.next() { - label := labels.label() - if label == "" { - return Hostname(""), fmt.Errorf( - "hostname contains empty label (two consecutive periods)", - ) - } - if strings.HasPrefix(label, acePrefix) { - return Hostname(""), fmt.Errorf( - "hostname label %q specified in punycode format; service hostnames must be given in unicode", - label, - ) - } - } - - result, err := idna.Lookup.ToASCII(given) - if err != nil { - return Hostname(""), err - } - return Hostname(result + portPortion), nil -} - -// ForDisplay returns a version of the receiver that is appropriate for display -// in the UI. This includes converting any punycode labels to their -// corresponding Unicode characters. -// -// A round-trip through ForComparison and this ForDisplay method does not -// guarantee the same result as calling this package's top-level ForDisplay -// function, since a round-trip through the Hostname type implies stricter -// handling than we do when doing basic display-only processing. -func (h Hostname) ForDisplay() string { - given := string(h) - var portPortion string - if colonPos := strings.Index(given, ":"); colonPos != -1 { - given, portPortion = given[:colonPos], given[colonPos:] - } - // We don't normalize the port portion here because we assume it's - // already been normalized on the way in. - - result, err := idna.Lookup.ToUnicode(given) - if err != nil { - // Should never happen, since type Hostname indicates that a string - // passed through our validation rules. - panic(fmt.Errorf("ForDisplay called on invalid Hostname: %s", err)) - } - return result + portPortion -} - -func (h Hostname) String() string { - return string(h) -} - -func (h Hostname) GoString() string { - return fmt.Sprintf("svchost.Hostname(%q)", string(h)) -} - -// normalizePortPortion attempts to normalize the "port portion" of a hostname, -// which begins with the first colon in the hostname and should be followed -// by a string of decimal digits. -// -// If the port portion is valid, a normalized version of it is returned along -// with a nil error. -// -// If the port portion is invalid, the input string is returned verbatim along -// with a non-nil error. -// -// An empty string is a valid port portion representing the absence of a port. -// If non-empty, the first character must be a colon. -func normalizePortPortion(s string) (string, error) { - if s == "" { - return s, nil - } - - if s[0] != ':' { - // should never happen, since caller tends to guarantee the presence - // of a colon due to how it's extracted from the string. - return s, errors.New("port portion is missing its initial colon") - } - - numStr := s[1:] - num, err := strconv.Atoi(numStr) - if err != nil { - return s, errors.New("port portion contains non-digit characters") - } - if num == 443 { - return "", nil // ":443" is the default - } - if num > 65535 { - return s, errors.New("port number is greater than 65535") - } - return fmt.Sprintf(":%d", num), nil -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go index 123b84bb..2069361a 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go @@ -49,7 +49,7 @@ type AcmeServerView struct { AcmeAccounts []*LinkView `json:"acmeAccounts,omitempty"` Name *string `json:"name"` Url *string `json:"url"` - Id json.Number `json:"id,omitempty"` + Id *string `json:"id,omitempty"` } //AcmeServersView - A collection of ACME servers. @@ -1158,18 +1158,18 @@ type TargetHostPortView struct { //ThirdPartyServiceView - A third-party service. type ThirdPartyServiceView struct { - AvailabilityProfileId *int `json:"availabilityProfileId,omitempty"` - ExpectedHostname *string `json:"expectedHostname,omitempty"` - HostValue *string `json:"hostValue,omitempty"` - Id json.Number `json:"id,omitempty"` - LoadBalancingStrategyId *int `json:"loadBalancingStrategyId,omitempty"` - MaxConnections *int `json:"maxConnections,omitempty"` - Name *string `json:"name"` - Secure *bool `json:"secure,omitempty"` - SkipHostnameVerification *bool `json:"skipHostnameVerification,omitempty"` - Targets *[]*string `json:"targets"` - TrustedCertificateGroupId *int `json:"trustedCertificateGroupId,omitempty"` - UseProxy *bool `json:"useProxy,omitempty"` + AvailabilityProfileId *int `json:"availabilityProfileId,omitempty"` + ExpectedHostname *string `json:"expectedHostname,omitempty"` + HostValue *string `json:"hostValue,omitempty"` + Id *string `json:"id,omitempty"` + LoadBalancingStrategyId *int `json:"loadBalancingStrategyId,omitempty"` + MaxConnections *int `json:"maxConnections,omitempty"` + Name *string `json:"name"` + Secure *bool `json:"secure,omitempty"` + SkipHostnameVerification *bool `json:"skipHostnameVerification,omitempty"` + Targets *[]*string `json:"targets"` + TrustedCertificateGroupId *int `json:"trustedCertificateGroupId,omitempty"` + UseProxy *bool `json:"useProxy,omitempty"` } //ThirdPartyServicesView - A collection of third-party service items. diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go index c01b0c8f..b6344965 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go @@ -10,6 +10,7 @@ import ( "net/http" "net/http/httputil" "net/url" + "runtime" ) const logReqMsg = `DEBUG: Request %s Details: @@ -159,6 +160,7 @@ func (c *Client) newRequest(method string, path *url.URL, body interface{}) (*ht } req.SetBasicAuth(c.Username, c.Password) req.Header.Add("X-Xsrf-Header", "PingAccess") + req.Header.Add("User-Agent", fmt.Sprintf("%s/%s (%s; %s; %s)", SDKName, SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)) return req, nil } diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccessSDK.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccessSDK.go new file mode 100644 index 00000000..e988a0ac --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccessSDK.go @@ -0,0 +1,5 @@ +package pingaccess + +const SDKName = "pingaccess-sdk-go" + +const SDKVersion = "6.0.1" diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore index 5091fb07..531fcc11 100644 --- a/vendor/github.com/jmespath/go-jmespath/.gitignore +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -1,4 +1,4 @@ -/jpgo +jpgo jmespath-fuzz.zip cpu.out go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 8e26ffee..9cfa988b 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -2,7 +2,7 @@ package jmespath import "strconv" -// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is // safe for concurrent use by multiple goroutines. type JMESPath struct { ast ASTNode diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml deleted file mode 100644 index 98db8f06..00000000 --- a/vendor/github.com/mattn/go-colorable/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - tip - -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md deleted file mode 100644 index 56729a92..00000000 --- a/vendor/github.com/mattn/go-colorable/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# go-colorable - -[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) -[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) -[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) -[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) - -Colorable writer for windows. - -For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) -This package is possible to handle escape sequence for ansi color on windows. - -## Too Bad! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) - - -## So Good! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) - -## Usage - -```go -logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) -logrus.SetOutput(colorable.NewColorableStdout()) - -logrus.Info("succeeded") -logrus.Warn("not correct") -logrus.Error("something error") -logrus.Fatal("panic") -``` - -You can compile above code on non-windows OSs. - -## Installation - -``` -$ go get github.com/mattn/go-colorable -``` - -# License - -MIT - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go deleted file mode 100644 index 1f28d773..00000000 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build appengine - -package colorable - -import ( - "io" - "os" - - _ "github.com/mattn/go-isatty" -) - -// NewColorable return new instance of Writer which handle escape sequence. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - return file -} - -// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. -func NewColorableStdout() io.Writer { - return os.Stdout -} - -// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. -func NewColorableStderr() io.Writer { - return os.Stderr -} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go deleted file mode 100644 index 887f203d..00000000 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !windows -// +build !appengine - -package colorable - -import ( - "io" - "os" - - _ "github.com/mattn/go-isatty" -) - -// NewColorable return new instance of Writer which handle escape sequence. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - return file -} - -// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. -func NewColorableStdout() io.Writer { - return os.Stdout -} - -// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. -func NewColorableStderr() io.Writer { - return os.Stderr -} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go deleted file mode 100644 index 404e10ca..00000000 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ /dev/null @@ -1,980 +0,0 @@ -// +build windows -// +build !appengine - -package colorable - -import ( - "bytes" - "io" - "math" - "os" - "strconv" - "strings" - "syscall" - "unsafe" - - "github.com/mattn/go-isatty" -) - -const ( - foregroundBlue = 0x1 - foregroundGreen = 0x2 - foregroundRed = 0x4 - foregroundIntensity = 0x8 - foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) - backgroundBlue = 0x10 - backgroundGreen = 0x20 - backgroundRed = 0x40 - backgroundIntensity = 0x80 - backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) -) - -const ( - genericRead = 0x80000000 - genericWrite = 0x40000000 -) - -const ( - consoleTextmodeBuffer = 0x1 -) - -type wchar uint16 -type short int16 -type dword uint32 -type word uint16 - -type coord struct { - x short - y short -} - -type smallRect struct { - left short - top short - right short - bottom short -} - -type consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord -} - -type consoleCursorInfo struct { - size dword - visible int32 -} - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") - procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") - procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") - procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") - procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") - procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") - procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") - procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") -) - -// Writer provide colorable Writer to the console -type Writer struct { - out io.Writer - handle syscall.Handle - althandle syscall.Handle - oldattr word - oldpos coord - rest bytes.Buffer -} - -// NewColorable return new instance of Writer which handle escape sequence from File. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - if isatty.IsTerminal(file.Fd()) { - var csbi consoleScreenBufferInfo - handle := syscall.Handle(file.Fd()) - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} - } - return file -} - -// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. -func NewColorableStdout() io.Writer { - return NewColorable(os.Stdout) -} - -// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. -func NewColorableStderr() io.Writer { - return NewColorable(os.Stderr) -} - -var color256 = map[int]int{ - 0: 0x000000, - 1: 0x800000, - 2: 0x008000, - 3: 0x808000, - 4: 0x000080, - 5: 0x800080, - 6: 0x008080, - 7: 0xc0c0c0, - 8: 0x808080, - 9: 0xff0000, - 10: 0x00ff00, - 11: 0xffff00, - 12: 0x0000ff, - 13: 0xff00ff, - 14: 0x00ffff, - 15: 0xffffff, - 16: 0x000000, - 17: 0x00005f, - 18: 0x000087, - 19: 0x0000af, - 20: 0x0000d7, - 21: 0x0000ff, - 22: 0x005f00, - 23: 0x005f5f, - 24: 0x005f87, - 25: 0x005faf, - 26: 0x005fd7, - 27: 0x005fff, - 28: 0x008700, - 29: 0x00875f, - 30: 0x008787, - 31: 0x0087af, - 32: 0x0087d7, - 33: 0x0087ff, - 34: 0x00af00, - 35: 0x00af5f, - 36: 0x00af87, - 37: 0x00afaf, - 38: 0x00afd7, - 39: 0x00afff, - 40: 0x00d700, - 41: 0x00d75f, - 42: 0x00d787, - 43: 0x00d7af, - 44: 0x00d7d7, - 45: 0x00d7ff, - 46: 0x00ff00, - 47: 0x00ff5f, - 48: 0x00ff87, - 49: 0x00ffaf, - 50: 0x00ffd7, - 51: 0x00ffff, - 52: 0x5f0000, - 53: 0x5f005f, - 54: 0x5f0087, - 55: 0x5f00af, - 56: 0x5f00d7, - 57: 0x5f00ff, - 58: 0x5f5f00, - 59: 0x5f5f5f, - 60: 0x5f5f87, - 61: 0x5f5faf, - 62: 0x5f5fd7, - 63: 0x5f5fff, - 64: 0x5f8700, - 65: 0x5f875f, - 66: 0x5f8787, - 67: 0x5f87af, - 68: 0x5f87d7, - 69: 0x5f87ff, - 70: 0x5faf00, - 71: 0x5faf5f, - 72: 0x5faf87, - 73: 0x5fafaf, - 74: 0x5fafd7, - 75: 0x5fafff, - 76: 0x5fd700, - 77: 0x5fd75f, - 78: 0x5fd787, - 79: 0x5fd7af, - 80: 0x5fd7d7, - 81: 0x5fd7ff, - 82: 0x5fff00, - 83: 0x5fff5f, - 84: 0x5fff87, - 85: 0x5fffaf, - 86: 0x5fffd7, - 87: 0x5fffff, - 88: 0x870000, - 89: 0x87005f, - 90: 0x870087, - 91: 0x8700af, - 92: 0x8700d7, - 93: 0x8700ff, - 94: 0x875f00, - 95: 0x875f5f, - 96: 0x875f87, - 97: 0x875faf, - 98: 0x875fd7, - 99: 0x875fff, - 100: 0x878700, - 101: 0x87875f, - 102: 0x878787, - 103: 0x8787af, - 104: 0x8787d7, - 105: 0x8787ff, - 106: 0x87af00, - 107: 0x87af5f, - 108: 0x87af87, - 109: 0x87afaf, - 110: 0x87afd7, - 111: 0x87afff, - 112: 0x87d700, - 113: 0x87d75f, - 114: 0x87d787, - 115: 0x87d7af, - 116: 0x87d7d7, - 117: 0x87d7ff, - 118: 0x87ff00, - 119: 0x87ff5f, - 120: 0x87ff87, - 121: 0x87ffaf, - 122: 0x87ffd7, - 123: 0x87ffff, - 124: 0xaf0000, - 125: 0xaf005f, - 126: 0xaf0087, - 127: 0xaf00af, - 128: 0xaf00d7, - 129: 0xaf00ff, - 130: 0xaf5f00, - 131: 0xaf5f5f, - 132: 0xaf5f87, - 133: 0xaf5faf, - 134: 0xaf5fd7, - 135: 0xaf5fff, - 136: 0xaf8700, - 137: 0xaf875f, - 138: 0xaf8787, - 139: 0xaf87af, - 140: 0xaf87d7, - 141: 0xaf87ff, - 142: 0xafaf00, - 143: 0xafaf5f, - 144: 0xafaf87, - 145: 0xafafaf, - 146: 0xafafd7, - 147: 0xafafff, - 148: 0xafd700, - 149: 0xafd75f, - 150: 0xafd787, - 151: 0xafd7af, - 152: 0xafd7d7, - 153: 0xafd7ff, - 154: 0xafff00, - 155: 0xafff5f, - 156: 0xafff87, - 157: 0xafffaf, - 158: 0xafffd7, - 159: 0xafffff, - 160: 0xd70000, - 161: 0xd7005f, - 162: 0xd70087, - 163: 0xd700af, - 164: 0xd700d7, - 165: 0xd700ff, - 166: 0xd75f00, - 167: 0xd75f5f, - 168: 0xd75f87, - 169: 0xd75faf, - 170: 0xd75fd7, - 171: 0xd75fff, - 172: 0xd78700, - 173: 0xd7875f, - 174: 0xd78787, - 175: 0xd787af, - 176: 0xd787d7, - 177: 0xd787ff, - 178: 0xd7af00, - 179: 0xd7af5f, - 180: 0xd7af87, - 181: 0xd7afaf, - 182: 0xd7afd7, - 183: 0xd7afff, - 184: 0xd7d700, - 185: 0xd7d75f, - 186: 0xd7d787, - 187: 0xd7d7af, - 188: 0xd7d7d7, - 189: 0xd7d7ff, - 190: 0xd7ff00, - 191: 0xd7ff5f, - 192: 0xd7ff87, - 193: 0xd7ffaf, - 194: 0xd7ffd7, - 195: 0xd7ffff, - 196: 0xff0000, - 197: 0xff005f, - 198: 0xff0087, - 199: 0xff00af, - 200: 0xff00d7, - 201: 0xff00ff, - 202: 0xff5f00, - 203: 0xff5f5f, - 204: 0xff5f87, - 205: 0xff5faf, - 206: 0xff5fd7, - 207: 0xff5fff, - 208: 0xff8700, - 209: 0xff875f, - 210: 0xff8787, - 211: 0xff87af, - 212: 0xff87d7, - 213: 0xff87ff, - 214: 0xffaf00, - 215: 0xffaf5f, - 216: 0xffaf87, - 217: 0xffafaf, - 218: 0xffafd7, - 219: 0xffafff, - 220: 0xffd700, - 221: 0xffd75f, - 222: 0xffd787, - 223: 0xffd7af, - 224: 0xffd7d7, - 225: 0xffd7ff, - 226: 0xffff00, - 227: 0xffff5f, - 228: 0xffff87, - 229: 0xffffaf, - 230: 0xffffd7, - 231: 0xffffff, - 232: 0x080808, - 233: 0x121212, - 234: 0x1c1c1c, - 235: 0x262626, - 236: 0x303030, - 237: 0x3a3a3a, - 238: 0x444444, - 239: 0x4e4e4e, - 240: 0x585858, - 241: 0x626262, - 242: 0x6c6c6c, - 243: 0x767676, - 244: 0x808080, - 245: 0x8a8a8a, - 246: 0x949494, - 247: 0x9e9e9e, - 248: 0xa8a8a8, - 249: 0xb2b2b2, - 250: 0xbcbcbc, - 251: 0xc6c6c6, - 252: 0xd0d0d0, - 253: 0xdadada, - 254: 0xe4e4e4, - 255: 0xeeeeee, -} - -// `\033]0;TITLESTR\007` -func doTitleSequence(er *bytes.Reader) error { - var c byte - var err error - - c, err = er.ReadByte() - if err != nil { - return err - } - if c != '0' && c != '2' { - return nil - } - c, err = er.ReadByte() - if err != nil { - return err - } - if c != ';' { - return nil - } - title := make([]byte, 0, 80) - for { - c, err = er.ReadByte() - if err != nil { - return err - } - if c == 0x07 || c == '\n' { - break - } - title = append(title, c) - } - if len(title) > 0 { - title8, err := syscall.UTF16PtrFromString(string(title)) - if err == nil { - procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) - } - } - return nil -} - -// Write write data on console -func (w *Writer) Write(data []byte) (n int, err error) { - var csbi consoleScreenBufferInfo - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - - handle := w.handle - - var er *bytes.Reader - if w.rest.Len() > 0 { - var rest bytes.Buffer - w.rest.WriteTo(&rest) - w.rest.Reset() - rest.Write(data) - er = bytes.NewReader(rest.Bytes()) - } else { - er = bytes.NewReader(data) - } - var bw [1]byte -loop: - for { - c1, err := er.ReadByte() - if err != nil { - break loop - } - if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) - continue - } - c2, err := er.ReadByte() - if err != nil { - break loop - } - - switch c2 { - case '>': - continue - case ']': - w.rest.WriteByte(c1) - w.rest.WriteByte(c2) - er.WriteTo(&w.rest) - if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { - break loop - } - er = bytes.NewReader(w.rest.Bytes()[2:]) - err := doTitleSequence(er) - if err != nil { - break loop - } - w.rest.Reset() - continue - // https://github.com/mattn/go-colorable/issues/27 - case '7': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - w.oldpos = csbi.cursorPosition - continue - case '8': - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) - continue - case 0x5b: - // execute part after switch - default: - continue - } - - w.rest.WriteByte(c1) - w.rest.WriteByte(c2) - er.WriteTo(&w.rest) - - var buf bytes.Buffer - var m byte - for i, c := range w.rest.Bytes()[2:] { - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - m = c - er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) - w.rest.Reset() - break - } - buf.Write([]byte(string(c))) - } - if m == 0 { - break loop - } - - switch m { - case 'A': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'B': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'C': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x += short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'D': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x -= short(n) - if csbi.cursorPosition.x < 0 { - csbi.cursorPosition.x = 0 - } - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'E': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = 0 - csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'F': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = 0 - csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'G': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = short(n - 1) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'H', 'f': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - if buf.Len() > 0 { - token := strings.Split(buf.String(), ";") - switch len(token) { - case 1: - n1, err := strconv.Atoi(token[0]) - if err != nil { - continue - } - csbi.cursorPosition.y = short(n1 - 1) - case 2: - n1, err := strconv.Atoi(token[0]) - if err != nil { - continue - } - n2, err := strconv.Atoi(token[1]) - if err != nil { - continue - } - csbi.cursorPosition.x = short(n2 - 1) - csbi.cursorPosition.y = short(n1 - 1) - } - } else { - csbi.cursorPosition.y = 0 - } - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'J': - n := 0 - if buf.Len() > 0 { - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - } - var count, written dword - var cursor coord - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - switch n { - case 0: - cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) - case 1: - cursor = coord{x: csbi.window.left, y: csbi.window.top} - count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) - case 2: - cursor = coord{x: csbi.window.left, y: csbi.window.top} - count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) - } - procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - case 'K': - n := 0 - if buf.Len() > 0 { - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - var cursor coord - var count, written dword - switch n { - case 0: - cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - count = dword(csbi.size.x - csbi.cursorPosition.x) - case 1: - cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} - count = dword(csbi.size.x - csbi.cursorPosition.x) - case 2: - cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} - count = dword(csbi.size.x) - } - procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - case 'm': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - attr := csbi.attributes - cs := buf.String() - if cs == "" { - procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) - continue - } - token := strings.Split(cs, ";") - for i := 0; i < len(token); i++ { - ns := token[i] - if n, err = strconv.Atoi(ns); err == nil { - switch { - case n == 0 || n == 100: - attr = w.oldattr - case 1 <= n && n <= 5: - attr |= foregroundIntensity - case n == 7: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case n == 22 || n == 25: - attr |= foregroundIntensity - case n == 27: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case 30 <= n && n <= 37: - attr &= backgroundMask - if (n-30)&1 != 0 { - attr |= foregroundRed - } - if (n-30)&2 != 0 { - attr |= foregroundGreen - } - if (n-30)&4 != 0 { - attr |= foregroundBlue - } - case n == 38: // set foreground color. - if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256foreAttr == nil { - n256setup() - } - attr &= backgroundMask - attr |= n256foreAttr[n256] - i += 2 - } - } else if len(token) == 5 && token[i+1] == "2" { - var r, g, b int - r, _ = strconv.Atoi(token[i+2]) - g, _ = strconv.Atoi(token[i+3]) - b, _ = strconv.Atoi(token[i+4]) - i += 4 - if r > 127 { - attr |= foregroundRed - } - if g > 127 { - attr |= foregroundGreen - } - if b > 127 { - attr |= foregroundBlue - } - } else { - attr = attr & (w.oldattr & backgroundMask) - } - case n == 39: // reset foreground color. - attr &= backgroundMask - attr |= w.oldattr & foregroundMask - case 40 <= n && n <= 47: - attr &= foregroundMask - if (n-40)&1 != 0 { - attr |= backgroundRed - } - if (n-40)&2 != 0 { - attr |= backgroundGreen - } - if (n-40)&4 != 0 { - attr |= backgroundBlue - } - case n == 48: // set background color. - if i < len(token)-2 && token[i+1] == "5" { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256backAttr == nil { - n256setup() - } - attr &= foregroundMask - attr |= n256backAttr[n256] - i += 2 - } - } else if len(token) == 5 && token[i+1] == "2" { - var r, g, b int - r, _ = strconv.Atoi(token[i+2]) - g, _ = strconv.Atoi(token[i+3]) - b, _ = strconv.Atoi(token[i+4]) - i += 4 - if r > 127 { - attr |= backgroundRed - } - if g > 127 { - attr |= backgroundGreen - } - if b > 127 { - attr |= backgroundBlue - } - } else { - attr = attr & (w.oldattr & foregroundMask) - } - case n == 49: // reset foreground color. - attr &= foregroundMask - attr |= w.oldattr & backgroundMask - case 90 <= n && n <= 97: - attr = (attr & backgroundMask) - attr |= foregroundIntensity - if (n-90)&1 != 0 { - attr |= foregroundRed - } - if (n-90)&2 != 0 { - attr |= foregroundGreen - } - if (n-90)&4 != 0 { - attr |= foregroundBlue - } - case 100 <= n && n <= 107: - attr = (attr & foregroundMask) - attr |= backgroundIntensity - if (n-100)&1 != 0 { - attr |= backgroundRed - } - if (n-100)&2 != 0 { - attr |= backgroundGreen - } - if (n-100)&4 != 0 { - attr |= backgroundBlue - } - } - procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) - } - } - case 'h': - var ci consoleCursorInfo - cs := buf.String() - if cs == "5>" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?25" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?1049" { - if w.althandle == 0 { - h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) - w.althandle = syscall.Handle(h) - if w.althandle != 0 { - handle = w.althandle - } - } - } - case 'l': - var ci consoleCursorInfo - cs := buf.String() - if cs == "5>" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?25" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?1049" { - if w.althandle != 0 { - syscall.CloseHandle(w.althandle) - w.althandle = 0 - handle = w.handle - } - } - case 's': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - w.oldpos = csbi.cursorPosition - case 'u': - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) - } - } - - return len(data), nil -} - -type consoleColor struct { - rgb int - red bool - green bool - blue bool - intensity bool -} - -func (c consoleColor) foregroundAttr() (attr word) { - if c.red { - attr |= foregroundRed - } - if c.green { - attr |= foregroundGreen - } - if c.blue { - attr |= foregroundBlue - } - if c.intensity { - attr |= foregroundIntensity - } - return -} - -func (c consoleColor) backgroundAttr() (attr word) { - if c.red { - attr |= backgroundRed - } - if c.green { - attr |= backgroundGreen - } - if c.blue { - attr |= backgroundBlue - } - if c.intensity { - attr |= backgroundIntensity - } - return -} - -var color16 = []consoleColor{ - {0x000000, false, false, false, false}, - {0x000080, false, false, true, false}, - {0x008000, false, true, false, false}, - {0x008080, false, true, true, false}, - {0x800000, true, false, false, false}, - {0x800080, true, false, true, false}, - {0x808000, true, true, false, false}, - {0xc0c0c0, true, true, true, false}, - {0x808080, false, false, false, true}, - {0x0000ff, false, false, true, true}, - {0x00ff00, false, true, false, true}, - {0x00ffff, false, true, true, true}, - {0xff0000, true, false, false, true}, - {0xff00ff, true, false, true, true}, - {0xffff00, true, true, false, true}, - {0xffffff, true, true, true, true}, -} - -type hsv struct { - h, s, v float32 -} - -func (a hsv) dist(b hsv) float32 { - dh := a.h - b.h - switch { - case dh > 0.5: - dh = 1 - dh - case dh < -0.5: - dh = -1 - dh - } - ds := a.s - b.s - dv := a.v - b.v - return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) -} - -func toHSV(rgb int) hsv { - r, g, b := float32((rgb&0xFF0000)>>16)/256.0, - float32((rgb&0x00FF00)>>8)/256.0, - float32(rgb&0x0000FF)/256.0 - min, max := minmax3f(r, g, b) - h := max - min - if h > 0 { - if max == r { - h = (g - b) / h - if h < 0 { - h += 6 - } - } else if max == g { - h = 2 + (b-r)/h - } else { - h = 4 + (r-g)/h - } - } - h /= 6.0 - s := max - min - if max != 0 { - s /= max - } - v := max - return hsv{h: h, s: s, v: v} -} - -type hsvTable []hsv - -func toHSVTable(rgbTable []consoleColor) hsvTable { - t := make(hsvTable, len(rgbTable)) - for i, c := range rgbTable { - t[i] = toHSV(c.rgb) - } - return t -} - -func (t hsvTable) find(rgb int) consoleColor { - hsv := toHSV(rgb) - n := 7 - l := float32(5.0) - for i, p := range t { - d := hsv.dist(p) - if d < l { - l, n = d, i - } - } - return color16[n] -} - -func minmax3f(a, b, c float32) (min, max float32) { - if a < b { - if b < c { - return a, c - } else if a < c { - return a, b - } else { - return c, b - } - } else { - if a < c { - return b, c - } else if b < c { - return b, a - } else { - return c, a - } - } -} - -var n256foreAttr []word -var n256backAttr []word - -func n256setup() { - n256foreAttr = make([]word, 256) - n256backAttr = make([]word, 256) - t := toHSVTable(color16) - for i, rgb := range color256 { - c := t.find(rgb) - n256foreAttr[i] = c.foregroundAttr() - n256backAttr[i] = c.backgroundAttr() - } -} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod deleted file mode 100644 index 9d9f4248..00000000 --- a/vendor/github.com/mattn/go-colorable/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/mattn/go-colorable - -require github.com/mattn/go-isatty v0.0.5 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum deleted file mode 100644 index 2c12960e..00000000 --- a/vendor/github.com/mattn/go-colorable/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go deleted file mode 100644 index 9721e16f..00000000 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ /dev/null @@ -1,55 +0,0 @@ -package colorable - -import ( - "bytes" - "io" -) - -// NonColorable hold writer but remove escape sequence. -type NonColorable struct { - out io.Writer -} - -// NewNonColorable return new instance of Writer which remove escape sequence from Writer. -func NewNonColorable(w io.Writer) io.Writer { - return &NonColorable{out: w} -} - -// Write write data on console -func (w *NonColorable) Write(data []byte) (n int, err error) { - er := bytes.NewReader(data) - var bw [1]byte -loop: - for { - c1, err := er.ReadByte() - if err != nil { - break loop - } - if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) - continue - } - c2, err := er.ReadByte() - if err != nil { - break loop - } - if c2 != 0x5b { - continue - } - - var buf bytes.Buffer - for { - c, err := er.ReadByte() - if err != nil { - break loop - } - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - break - } - buf.Write([]byte(string(c))) - } - } - - return len(data), nil -} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml deleted file mode 100644 index 604314dd..00000000 --- a/vendor/github.com/mattn/go-isatty/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false -go: - - 1.13.x - - tip - -before_install: - - go get -t -v ./... - -script: - - ./go.test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE deleted file mode 100644 index 65dc692b..00000000 --- a/vendor/github.com/mattn/go-isatty/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) Yasuhiro MATSUMOTO - -MIT License (Expat) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md deleted file mode 100644 index 38418353..00000000 --- a/vendor/github.com/mattn/go-isatty/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# go-isatty - -[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) -[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) -[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) -[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) - -isatty for golang - -## Usage - -```go -package main - -import ( - "fmt" - "github.com/mattn/go-isatty" - "os" -) - -func main() { - if isatty.IsTerminal(os.Stdout.Fd()) { - fmt.Println("Is Terminal") - } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { - fmt.Println("Is Cygwin/MSYS2 Terminal") - } else { - fmt.Println("Is Not Terminal") - } -} -``` - -## Installation - -``` -$ go get github.com/mattn/go-isatty -``` - -## License - -MIT - -## Author - -Yasuhiro Matsumoto (a.k.a mattn) - -## Thanks - -* k-takata: base idea for IsCygwinTerminal - - https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go deleted file mode 100644 index 17d4f90e..00000000 --- a/vendor/github.com/mattn/go-isatty/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package isatty implements interface to isatty -package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod deleted file mode 100644 index 605c4c22..00000000 --- a/vendor/github.com/mattn/go-isatty/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/mattn/go-isatty - -go 1.12 - -require golang.org/x/sys v0.0.0-20200116001909-b77594299b42 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum deleted file mode 100644 index 912e29cb..00000000 --- a/vendor/github.com/mattn/go-isatty/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh deleted file mode 100644 index 012162b0..00000000 --- a/vendor/github.com/mattn/go-isatty/go.test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list ./... | grep -v vendor); do - go test -race -coverprofile=profile.out -covermode=atomic "$d" - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go deleted file mode 100644 index 711f2880..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly -// +build !appengine - -package isatty - -import "golang.org/x/sys/unix" - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) - return err == nil -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go deleted file mode 100644 index ff714a37..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine js nacl - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on js and appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go deleted file mode 100644 index c5b6e0c0..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_plan9.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build plan9 - -package isatty - -import ( - "syscall" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - path, err := syscall.Fd2path(int(fd)) - if err != nil { - return false - } - return path == "/dev/cons" || path == "/mnt/term/dev/cons" -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go deleted file mode 100644 index bdd5c79a..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build solaris -// +build !appengine - -package isatty - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c -func IsTerminal(fd uintptr) bool { - var termio unix.Termio - err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) - return err == nil -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go deleted file mode 100644 index 31a1ca97..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux aix -// +build !appengine - -package isatty - -import "golang.org/x/sys/unix" - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) - return err == nil -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go deleted file mode 100644 index 1fa86915..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ /dev/null @@ -1,125 +0,0 @@ -// +build windows -// +build !appengine - -package isatty - -import ( - "errors" - "strings" - "syscall" - "unicode/utf16" - "unsafe" -) - -const ( - objectNameInfo uintptr = 1 - fileNameInfo = 2 - fileTypePipe = 3 -) - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - ntdll = syscall.NewLazyDLL("ntdll.dll") - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") - procGetFileType = kernel32.NewProc("GetFileType") - procNtQueryObject = ntdll.NewProc("NtQueryObject") -) - -func init() { - // Check if GetFileInformationByHandleEx is available. - if procGetFileInformationByHandleEx.Find() != nil { - procGetFileInformationByHandleEx = nil - } -} - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} - -// Check pipe name is used for cygwin/msys2 pty. -// Cygwin/MSYS2 PTY has a name like: -// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master -func isCygwinPipeName(name string) bool { - token := strings.Split(name, "-") - if len(token) < 5 { - return false - } - - if token[0] != `\msys` && - token[0] != `\cygwin` && - token[0] != `\Device\NamedPipe\msys` && - token[0] != `\Device\NamedPipe\cygwin` { - return false - } - - if token[1] == "" { - return false - } - - if !strings.HasPrefix(token[2], "pty") { - return false - } - - if token[3] != `from` && token[3] != `to` { - return false - } - - if token[4] != "master" { - return false - } - - return true -} - -// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler -// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion -// guys are using Windows XP, this is a workaround for those guys, it will also work on system from -// Windows vista to 10 -// see https://stackoverflow.com/a/18792477 for details -func getFileNameByHandle(fd uintptr) (string, error) { - if procNtQueryObject == nil { - return "", errors.New("ntdll.dll: NtQueryObject not supported") - } - - var buf [4 + syscall.MAX_PATH]uint16 - var result int - r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, - fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) - if r != 0 { - return "", e - } - return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. -func IsCygwinTerminal(fd uintptr) bool { - if procGetFileInformationByHandleEx == nil { - name, err := getFileNameByHandle(fd) - if err != nil { - return false - } - return isCygwinPipeName(name) - } - - // Cygwin/msys's pty is a pipe. - ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) - if ft != fileTypePipe || e != 0 { - return false - } - - var buf [2 + syscall.MAX_PATH]uint16 - r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), - 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), - uintptr(len(buf)*2), 0, 0) - if r == 0 || e != 0 { - return false - } - - l := *(*uint32)(unsafe.Pointer(&buf)) - return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) -} diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json deleted file mode 100644 index 5ae9d96b..00000000 --- a/vendor/github.com/mattn/go-isatty/renovate.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "extends": [ - "config:base" - ], - "postUpdateOptions": [ - "gomodTidy" - ] -} diff --git a/vendor/github.com/mitchellh/cli/.travis.yml b/vendor/github.com/mitchellh/cli/.travis.yml deleted file mode 100644 index b8599b3a..00000000 --- a/vendor/github.com/mitchellh/cli/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -sudo: false - -language: go - -go: - - "1.8" - - "1.9" - - "1.10" - -branches: - only: - - master - -script: make updatedeps test testrace diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/mitchellh/cli/LICENSE deleted file mode 100644 index c33dcc7c..00000000 --- a/vendor/github.com/mitchellh/cli/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile deleted file mode 100644 index 4874b008..00000000 --- a/vendor/github.com/mitchellh/cli/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -TEST?=./... - -default: test - -# test runs the test suite and vets the code -test: - go list $(TEST) | xargs -n1 go test -timeout=60s -parallel=10 $(TESTARGS) - -# testrace runs the race checker -testrace: - go list $(TEST) | xargs -n1 go test -race $(TESTARGS) - -# updatedeps installs all the dependencies to run and build -updatedeps: - go list ./... \ - | xargs go list -f '{{ join .Deps "\n" }}{{ printf "\n" }}{{ join .TestImports "\n" }}' \ - | grep -v github.com/mitchellh/cli \ - | xargs go get -f -u -v - -.PHONY: test testrace updatedeps diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md deleted file mode 100644 index 8f02cdd0..00000000 --- a/vendor/github.com/mitchellh/cli/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://godoc.org/github.com/mitchellh/cli) - -cli is a library for implementing powerful command-line interfaces in Go. -cli is the library that powers the CLI for -[Packer](https://github.com/mitchellh/packer), -[Serf](https://github.com/hashicorp/serf), -[Consul](https://github.com/hashicorp/consul), -[Vault](https://github.com/hashicorp/vault), -[Terraform](https://github.com/hashicorp/terraform), and -[Nomad](https://github.com/hashicorp/nomad). - -## Features - -* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc. - -* Support for nested subcommands such as `cli foo bar`. - -* Optional support for default subcommands so `cli` does something - other than error. - -* Support for shell autocompletion of subcommands, flags, and arguments - with callbacks in Go. You don't need to write any shell code. - -* Automatic help generation for listing subcommands - -* Automatic help flag recognition of `-h`, `--help`, etc. - -* Automatic version flag recognition of `-v`, `--version`. - -* Helpers for interacting with the terminal, such as outputting information, - asking for input, etc. These are optional, you can always interact with the - terminal however you choose. - -* Use of Go interfaces/types makes augmenting various parts of the library a - piece of cake. - -## Example - -Below is a simple example of creating and running a CLI - -```go -package main - -import ( - "log" - "os" - - "github.com/mitchellh/cli" -) - -func main() { - c := cli.NewCLI("app", "1.0.0") - c.Args = os.Args[1:] - c.Commands = map[string]cli.CommandFactory{ - "foo": fooCommandFactory, - "bar": barCommandFactory, - } - - exitStatus, err := c.Run() - if err != nil { - log.Println(err) - } - - os.Exit(exitStatus) -} -``` - diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go deleted file mode 100644 index 3bec6258..00000000 --- a/vendor/github.com/mitchellh/cli/autocomplete.go +++ /dev/null @@ -1,43 +0,0 @@ -package cli - -import ( - "github.com/posener/complete/cmd/install" -) - -// autocompleteInstaller is an interface to be implemented to perform the -// autocomplete installation and uninstallation with a CLI. -// -// This interface is not exported because it only exists for unit tests -// to be able to test that the installation is called properly. -type autocompleteInstaller interface { - Install(string) error - Uninstall(string) error -} - -// realAutocompleteInstaller uses the real install package to do the -// install/uninstall. -type realAutocompleteInstaller struct{} - -func (i *realAutocompleteInstaller) Install(cmd string) error { - return install.Install(cmd) -} - -func (i *realAutocompleteInstaller) Uninstall(cmd string) error { - return install.Uninstall(cmd) -} - -// mockAutocompleteInstaller is used for tests to record the install/uninstall. -type mockAutocompleteInstaller struct { - InstallCalled bool - UninstallCalled bool -} - -func (i *mockAutocompleteInstaller) Install(cmd string) error { - i.InstallCalled = true - return nil -} - -func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { - i.UninstallCalled = true - return nil -} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go deleted file mode 100644 index c2dbe55a..00000000 --- a/vendor/github.com/mitchellh/cli/cli.go +++ /dev/null @@ -1,720 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "os" - "regexp" - "sort" - "strings" - "sync" - "text/template" - - "github.com/armon/go-radix" - "github.com/posener/complete" -) - -// CLI contains the state necessary to run subcommands and parse the -// command line arguments. -// -// CLI also supports nested subcommands, such as "cli foo bar". To use -// nested subcommands, the key in the Commands mapping below contains the -// full subcommand. In this example, it would be "foo bar". -// -// If you use a CLI with nested subcommands, some semantics change due to -// ambiguities: -// -// * We use longest prefix matching to find a matching subcommand. This -// means if you register "foo bar" and the user executes "cli foo qux", -// the "foo" command will be executed with the arg "qux". It is up to -// you to handle these args. One option is to just return the special -// help return code `RunResultHelp` to display help and exit. -// -// * The help flag "-h" or "-help" will look at all args to determine -// the help function. For example: "otto apps list -h" will show the -// help for "apps list" but "otto apps -h" will show it for "apps". -// In the normal CLI, only the first subcommand is used. -// -// * The help flag will list any subcommands that a command takes -// as well as the command's help itself. If there are no subcommands, -// it will note this. If the CLI itself has no subcommands, this entire -// section is omitted. -// -// * Any parent commands that don't exist are automatically created as -// no-op commands that just show help for other subcommands. For example, -// if you only register "foo bar", then "foo" is automatically created. -// -type CLI struct { - // Args is the list of command-line arguments received excluding - // the name of the app. For example, if the command "./cli foo bar" - // was invoked, then Args should be []string{"foo", "bar"}. - Args []string - - // Commands is a mapping of subcommand names to a factory function - // for creating that Command implementation. If there is a command - // with a blank string "", then it will be used as the default command - // if no subcommand is specified. - // - // If the key has a space in it, this will create a nested subcommand. - // For example, if the key is "foo bar", then to access it our CLI - // must be accessed with "./cli foo bar". See the docs for CLI for - // notes on how this changes some other behavior of the CLI as well. - // - // The factory should be as cheap as possible, ideally only allocating - // a struct. The factory may be called multiple times in the course - // of a command execution and certain events such as help require the - // instantiation of all commands. Expensive initialization should be - // deferred to function calls within the interface implementation. - Commands map[string]CommandFactory - - // HiddenCommands is a list of commands that are "hidden". Hidden - // commands are not given to the help function callback and do not - // show up in autocomplete. The values in the slice should be equivalent - // to the keys in the command map. - HiddenCommands []string - - // Name defines the name of the CLI. - Name string - - // Version of the CLI. - Version string - - // Autocomplete enables or disables subcommand auto-completion support. - // This is enabled by default when NewCLI is called. Otherwise, this - // must enabled explicitly. - // - // Autocomplete requires the "Name" option to be set on CLI. This name - // should be set exactly to the binary name that is autocompleted. - // - // Autocompletion is supported via the github.com/posener/complete - // library. This library supports bash, zsh and fish. To add support - // for other shells, please see that library. - // - // AutocompleteInstall and AutocompleteUninstall are the global flag - // names for installing and uninstalling the autocompletion handlers - // for the user's shell. The flag should omit the hyphen(s) in front of - // the value. Both single and double hyphens will automatically be supported - // for the flag name. These default to `autocomplete-install` and - // `autocomplete-uninstall` respectively. - // - // AutocompleteNoDefaultFlags is a boolean which controls if the default auto- - // complete flags like -help and -version are added to the output. - // - // AutocompleteGlobalFlags are a mapping of global flags for - // autocompletion. The help and version flags are automatically added. - Autocomplete bool - AutocompleteInstall string - AutocompleteUninstall string - AutocompleteNoDefaultFlags bool - AutocompleteGlobalFlags complete.Flags - autocompleteInstaller autocompleteInstaller // For tests - - // HelpFunc and HelpWriter are used to output help information, if - // requested. - // - // HelpFunc is the function called to generate the generic help - // text that is shown if help must be shown for the CLI that doesn't - // pertain to a specific command. - // - // HelpWriter is the Writer where the help text is outputted to. If - // not specified, it will default to Stderr. - HelpFunc HelpFunc - HelpWriter io.Writer - - //--------------------------------------------------------------- - // Internal fields set automatically - - once sync.Once - autocomplete *complete.Complete - commandTree *radix.Tree - commandNested bool - commandHidden map[string]struct{} - subcommand string - subcommandArgs []string - topFlags []string - - // These are true when special global flags are set. We can/should - // probably use a bitset for this one day. - isHelp bool - isVersion bool - isAutocompleteInstall bool - isAutocompleteUninstall bool -} - -// NewClI returns a new CLI instance with sensible defaults. -func NewCLI(app, version string) *CLI { - return &CLI{ - Name: app, - Version: version, - HelpFunc: BasicHelpFunc(app), - Autocomplete: true, - } - -} - -// IsHelp returns whether or not the help flag is present within the -// arguments. -func (c *CLI) IsHelp() bool { - c.once.Do(c.init) - return c.isHelp -} - -// IsVersion returns whether or not the version flag is present within the -// arguments. -func (c *CLI) IsVersion() bool { - c.once.Do(c.init) - return c.isVersion -} - -// Run runs the actual CLI based on the arguments given. -func (c *CLI) Run() (int, error) { - c.once.Do(c.init) - - // If this is a autocompletion request, satisfy it. This must be called - // first before anything else since its possible to be autocompleting - // -help or -version or other flags and we want to show completions - // and not actually write the help or version. - if c.Autocomplete && c.autocomplete.Complete() { - return 0, nil - } - - // Just show the version and exit if instructed. - if c.IsVersion() && c.Version != "" { - c.HelpWriter.Write([]byte(c.Version + "\n")) - return 0, nil - } - - // Just print the help when only '-h' or '--help' is passed. - if c.IsHelp() && c.Subcommand() == "" { - c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.Subcommand())) + "\n")) - return 0, nil - } - - // If we're attempting to install or uninstall autocomplete then handle - if c.Autocomplete { - // Autocomplete requires the "Name" to be set so that we know what - // command to setup the autocomplete on. - if c.Name == "" { - return 1, fmt.Errorf( - "internal error: CLI.Name must be specified for autocomplete to work") - } - - // If both install and uninstall flags are specified, then error - if c.isAutocompleteInstall && c.isAutocompleteUninstall { - return 1, fmt.Errorf( - "Either the autocomplete install or uninstall flag may " + - "be specified, but not both.") - } - - // If the install flag is specified, perform the install or uninstall - if c.isAutocompleteInstall { - if err := c.autocompleteInstaller.Install(c.Name); err != nil { - return 1, err - } - - return 0, nil - } - - if c.isAutocompleteUninstall { - if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { - return 1, err - } - - return 0, nil - } - } - - // Attempt to get the factory function for creating the command - // implementation. If the command is invalid or blank, it is an error. - raw, ok := c.commandTree.Get(c.Subcommand()) - if !ok { - c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) - return 127, nil - } - - command, err := raw.(CommandFactory)() - if err != nil { - return 1, err - } - - // If we've been instructed to just print the help, then print it - if c.IsHelp() { - c.commandHelp(command) - return 0, nil - } - - // If there is an invalid flag, then error - if len(c.topFlags) > 0 { - c.HelpWriter.Write([]byte( - "Invalid flags before the subcommand. If these flags are for\n" + - "the subcommand, please put them after the subcommand.\n\n")) - c.commandHelp(command) - return 1, nil - } - - code := command.Run(c.SubcommandArgs()) - if code == RunResultHelp { - // Requesting help - c.commandHelp(command) - return 1, nil - } - - return code, nil -} - -// Subcommand returns the subcommand that the CLI would execute. For -// example, a CLI from "--version version --help" would return a Subcommand -// of "version" -func (c *CLI) Subcommand() string { - c.once.Do(c.init) - return c.subcommand -} - -// SubcommandArgs returns the arguments that will be passed to the -// subcommand. -func (c *CLI) SubcommandArgs() []string { - c.once.Do(c.init) - return c.subcommandArgs -} - -// subcommandParent returns the parent of this subcommand, if there is one. -// If there isn't on, "" is returned. -func (c *CLI) subcommandParent() string { - // Get the subcommand, if it is "" alread just return - sub := c.Subcommand() - if sub == "" { - return sub - } - - // Clear any trailing spaces and find the last space - sub = strings.TrimRight(sub, " ") - idx := strings.LastIndex(sub, " ") - - if idx == -1 { - // No space means our parent is root - return "" - } - - return sub[:idx] -} - -func (c *CLI) init() { - if c.HelpFunc == nil { - c.HelpFunc = BasicHelpFunc("app") - - if c.Name != "" { - c.HelpFunc = BasicHelpFunc(c.Name) - } - } - - if c.HelpWriter == nil { - c.HelpWriter = os.Stderr - } - - // Build our hidden commands - if len(c.HiddenCommands) > 0 { - c.commandHidden = make(map[string]struct{}) - for _, h := range c.HiddenCommands { - c.commandHidden[h] = struct{}{} - } - } - - // Build our command tree - c.commandTree = radix.New() - c.commandNested = false - for k, v := range c.Commands { - k = strings.TrimSpace(k) - c.commandTree.Insert(k, v) - if strings.ContainsRune(k, ' ') { - c.commandNested = true - } - } - - // Go through the key and fill in any missing parent commands - if c.commandNested { - var walkFn radix.WalkFn - toInsert := make(map[string]struct{}) - walkFn = func(k string, raw interface{}) bool { - idx := strings.LastIndex(k, " ") - if idx == -1 { - // If there is no space, just ignore top level commands - return false - } - - // Trim up to that space so we can get the expected parent - k = k[:idx] - if _, ok := c.commandTree.Get(k); ok { - // Yay we have the parent! - return false - } - - // We're missing the parent, so let's insert this - toInsert[k] = struct{}{} - - // Call the walk function recursively so we check this one too - return walkFn(k, nil) - } - - // Walk! - c.commandTree.Walk(walkFn) - - // Insert any that we're missing - for k := range toInsert { - var f CommandFactory = func() (Command, error) { - return &MockCommand{ - HelpText: "This command is accessed by using one of the subcommands below.", - RunResult: RunResultHelp, - }, nil - } - - c.commandTree.Insert(k, f) - } - } - - // Setup autocomplete if we have it enabled. We have to do this after - // the command tree is setup so we can use the radix tree to easily find - // all subcommands. - if c.Autocomplete { - c.initAutocomplete() - } - - // Process the args - c.processArgs() -} - -func (c *CLI) initAutocomplete() { - if c.AutocompleteInstall == "" { - c.AutocompleteInstall = defaultAutocompleteInstall - } - - if c.AutocompleteUninstall == "" { - c.AutocompleteUninstall = defaultAutocompleteUninstall - } - - if c.autocompleteInstaller == nil { - c.autocompleteInstaller = &realAutocompleteInstaller{} - } - - // Build the root command - cmd := c.initAutocompleteSub("") - - // For the root, we add the global flags to the "Flags". This way - // they don't show up on every command. - if !c.AutocompleteNoDefaultFlags { - cmd.Flags = map[string]complete.Predictor{ - "-" + c.AutocompleteInstall: complete.PredictNothing, - "-" + c.AutocompleteUninstall: complete.PredictNothing, - "-help": complete.PredictNothing, - "-version": complete.PredictNothing, - } - } - cmd.GlobalFlags = c.AutocompleteGlobalFlags - - c.autocomplete = complete.New(c.Name, cmd) -} - -// initAutocompleteSub creates the complete.Command for a subcommand with -// the given prefix. This will continue recursively for all subcommands. -// The prefix "" (empty string) can be used for the root command. -func (c *CLI) initAutocompleteSub(prefix string) complete.Command { - var cmd complete.Command - walkFn := func(k string, raw interface{}) bool { - // Ignore the empty key which can be present for default commands. - if k == "" { - return false - } - - // Keep track of the full key so that we can nest further if necessary - fullKey := k - - if len(prefix) > 0 { - // If we have a prefix, trim the prefix + 1 (for the space) - // Example: turns "sub one" to "one" with prefix "sub" - k = k[len(prefix)+1:] - } - - if idx := strings.Index(k, " "); idx >= 0 { - // If there is a space, we trim up to the space. This turns - // "sub sub2 sub3" into "sub". The prefix trim above will - // trim our current depth properly. - k = k[:idx] - } - - if _, ok := cmd.Sub[k]; ok { - // If we already tracked this subcommand then ignore - return false - } - - // If the command is hidden, don't record it at all - if _, ok := c.commandHidden[fullKey]; ok { - return false - } - - if cmd.Sub == nil { - cmd.Sub = complete.Commands(make(map[string]complete.Command)) - } - subCmd := c.initAutocompleteSub(fullKey) - - // Instantiate the command so that we can check if the command is - // a CommandAutocomplete implementation. If there is an error - // creating the command, we just ignore it since that will be caught - // later. - impl, err := raw.(CommandFactory)() - if err != nil { - impl = nil - } - - // Check if it implements ComandAutocomplete. If so, setup the autocomplete - if c, ok := impl.(CommandAutocomplete); ok { - subCmd.Args = c.AutocompleteArgs() - subCmd.Flags = c.AutocompleteFlags() - } - - cmd.Sub[k] = subCmd - return false - } - - walkPrefix := prefix - if walkPrefix != "" { - walkPrefix += " " - } - - c.commandTree.WalkPrefix(walkPrefix, walkFn) - return cmd -} - -func (c *CLI) commandHelp(command Command) { - // Get the template to use - tpl := strings.TrimSpace(defaultHelpTemplate) - if t, ok := command.(CommandHelpTemplate); ok { - tpl = t.HelpTemplate() - } - if !strings.HasSuffix(tpl, "\n") { - tpl += "\n" - } - - // Parse it - t, err := template.New("root").Parse(tpl) - if err != nil { - t = template.Must(template.New("root").Parse(fmt.Sprintf( - "Internal error! Failed to parse command help template: %s\n", err))) - } - - // Template data - data := map[string]interface{}{ - "Name": c.Name, - "Help": command.Help(), - } - - // Build subcommand list if we have it - var subcommandsTpl []map[string]interface{} - if c.commandNested { - // Get the matching keys - subcommands := c.helpCommands(c.Subcommand()) - keys := make([]string, 0, len(subcommands)) - for k := range subcommands { - keys = append(keys, k) - } - - // Sort the keys - sort.Strings(keys) - - // Figure out the padding length - var longest int - for _, k := range keys { - if v := len(k); v > longest { - longest = v - } - } - - // Go through and create their structures - subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) - for _, k := range keys { - // Get the command - raw, ok := subcommands[k] - if !ok { - c.HelpWriter.Write([]byte(fmt.Sprintf( - "Error getting subcommand %q", k))) - } - sub, err := raw() - if err != nil { - c.HelpWriter.Write([]byte(fmt.Sprintf( - "Error instantiating %q: %s", k, err))) - } - - // Find the last space and make sure we only include that last part - name := k - if idx := strings.LastIndex(k, " "); idx > -1 { - name = name[idx+1:] - } - - subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ - "Name": name, - "NameAligned": name + strings.Repeat(" ", longest-len(k)), - "Help": sub.Help(), - "Synopsis": sub.Synopsis(), - }) - } - } - data["Subcommands"] = subcommandsTpl - - // Write - err = t.Execute(c.HelpWriter, data) - if err == nil { - return - } - - // An error, just output... - c.HelpWriter.Write([]byte(fmt.Sprintf( - "Internal error rendering help: %s", err))) -} - -// helpCommands returns the subcommands for the HelpFunc argument. -// This will only contain immediate subcommands. -func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { - // If our prefix isn't empty, make sure it ends in ' ' - if prefix != "" && prefix[len(prefix)-1] != ' ' { - prefix += " " - } - - // Get all the subkeys of this command - var keys []string - c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { - // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" - if !strings.Contains(k[len(prefix):], " ") { - keys = append(keys, k) - } - - return false - }) - - // For each of the keys return that in the map - result := make(map[string]CommandFactory, len(keys)) - for _, k := range keys { - raw, ok := c.commandTree.Get(k) - if !ok { - // We just got it via WalkPrefix above, so we just panic - panic("not found: " + k) - } - - // If this is a hidden command, don't show it - if _, ok := c.commandHidden[k]; ok { - continue - } - - result[k] = raw.(CommandFactory) - } - - return result -} - -func (c *CLI) processArgs() { - for i, arg := range c.Args { - if arg == "--" { - break - } - - // Check for help flags. - if arg == "-h" || arg == "-help" || arg == "--help" { - c.isHelp = true - continue - } - - // Check for autocomplete flags - if c.Autocomplete { - if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { - c.isAutocompleteInstall = true - continue - } - - if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { - c.isAutocompleteUninstall = true - continue - } - } - - if c.subcommand == "" { - // Check for version flags if not in a subcommand. - if arg == "-v" || arg == "-version" || arg == "--version" { - c.isVersion = true - continue - } - - if arg != "" && arg[0] == '-' { - // Record the arg... - c.topFlags = append(c.topFlags, arg) - } - } - - // If we didn't find a subcommand yet and this is the first non-flag - // argument, then this is our subcommand. - if c.subcommand == "" && arg != "" && arg[0] != '-' { - c.subcommand = arg - if c.commandNested { - // If the command has a space in it, then it is invalid. - // Set a blank command so that it fails. - if strings.ContainsRune(arg, ' ') { - c.subcommand = "" - return - } - - // Determine the argument we look to to end subcommands. - // We look at all arguments until one has a space. This - // disallows commands like: ./cli foo "bar baz". An argument - // with a space is always an argument. - j := 0 - for k, v := range c.Args[i:] { - if strings.ContainsRune(v, ' ') { - break - } - - j = i + k + 1 - } - - // Nested CLI, the subcommand is actually the entire - // arg list up to a flag that is still a valid subcommand. - searchKey := strings.Join(c.Args[i:j], " ") - k, _, ok := c.commandTree.LongestPrefix(searchKey) - if ok { - // k could be a prefix that doesn't contain the full - // command such as "foo" instead of "foobar", so we - // need to verify that we have an entire key. To do that, - // we look for an ending in a space or an end of string. - reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`) - if reVerify.MatchString(searchKey) { - c.subcommand = k - i += strings.Count(k, " ") - } - } - } - - // The remaining args the subcommand arguments - c.subcommandArgs = c.Args[i+1:] - } - } - - // If we never found a subcommand and support a default command, then - // switch to using that. - if c.subcommand == "" { - if _, ok := c.Commands[""]; ok { - args := c.topFlags - args = append(args, c.subcommandArgs...) - c.topFlags = nil - c.subcommandArgs = args - } - } -} - -// defaultAutocompleteInstall and defaultAutocompleteUninstall are the -// default values for the autocomplete install and uninstall flags. -const defaultAutocompleteInstall = "autocomplete-install" -const defaultAutocompleteUninstall = "autocomplete-uninstall" - -const defaultHelpTemplate = ` -{{.Help}}{{if gt (len .Subcommands) 0}} - -Subcommands: -{{- range $value := .Subcommands }} - {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} -{{- end }} -` diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go deleted file mode 100644 index bed11faf..00000000 --- a/vendor/github.com/mitchellh/cli/command.go +++ /dev/null @@ -1,67 +0,0 @@ -package cli - -import ( - "github.com/posener/complete" -) - -const ( - // RunResultHelp is a value that can be returned from Run to signal - // to the CLI to render the help output. - RunResultHelp = -18511 -) - -// A command is a runnable sub-command of a CLI. -type Command interface { - // Help should return long-form help text that includes the command-line - // usage, a brief few sentences explaining the function of the command, - // and the complete list of flags the command accepts. - Help() string - - // Run should run the actual command with the given CLI instance and - // command-line arguments. It should return the exit status when it is - // finished. - // - // There are a handful of special exit codes this can return documented - // above that change behavior. - Run(args []string) int - - // Synopsis should return a one-line, short synopsis of the command. - // This should be less than 50 characters ideally. - Synopsis() string -} - -// CommandAutocomplete is an extension of Command that enables fine-grained -// autocompletion. Subcommand autocompletion will work even if this interface -// is not implemented. By implementing this interface, more advanced -// autocompletion is enabled. -type CommandAutocomplete interface { - // AutocompleteArgs returns the argument predictor for this command. - // If argument completion is not supported, this should return - // complete.PredictNothing. - AutocompleteArgs() complete.Predictor - - // AutocompleteFlags returns a mapping of supported flags and autocomplete - // options for this command. The map key for the Flags map should be the - // complete flag such as "-foo" or "--foo". - AutocompleteFlags() complete.Flags -} - -// CommandHelpTemplate is an extension of Command that also has a function -// for returning a template for the help rather than the help itself. In -// this scenario, both Help and HelpTemplate should be implemented. -// -// If CommandHelpTemplate isn't implemented, the Help is output as-is. -type CommandHelpTemplate interface { - // HelpTemplate is the template in text/template format to use for - // displaying the Help. The keys available are: - // - // * ".Help" - The help text itself - // * ".Subcommands" - // - HelpTemplate() string -} - -// CommandFactory is a type of function that is a factory for commands. -// We need a factory because we may need to setup some state on the -// struct that implements the command itself. -type CommandFactory func() (Command, error) diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go deleted file mode 100644 index 7a584b7e..00000000 --- a/vendor/github.com/mitchellh/cli/command_mock.go +++ /dev/null @@ -1,63 +0,0 @@ -package cli - -import ( - "github.com/posener/complete" -) - -// MockCommand is an implementation of Command that can be used for tests. -// It is publicly exported from this package in case you want to use it -// externally. -type MockCommand struct { - // Settable - HelpText string - RunResult int - SynopsisText string - - // Set by the command - RunCalled bool - RunArgs []string -} - -func (c *MockCommand) Help() string { - return c.HelpText -} - -func (c *MockCommand) Run(args []string) int { - c.RunCalled = true - c.RunArgs = args - - return c.RunResult -} - -func (c *MockCommand) Synopsis() string { - return c.SynopsisText -} - -// MockCommandAutocomplete is an implementation of CommandAutocomplete. -type MockCommandAutocomplete struct { - MockCommand - - // Settable - AutocompleteArgsValue complete.Predictor - AutocompleteFlagsValue complete.Flags -} - -func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { - return c.AutocompleteArgsValue -} - -func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { - return c.AutocompleteFlagsValue -} - -// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. -type MockCommandHelpTemplate struct { - MockCommand - - // Settable - HelpTemplateText string -} - -func (c *MockCommandHelpTemplate) HelpTemplate() string { - return c.HelpTemplateText -} diff --git a/vendor/github.com/mitchellh/cli/go.mod b/vendor/github.com/mitchellh/cli/go.mod deleted file mode 100644 index 675325ff..00000000 --- a/vendor/github.com/mitchellh/cli/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/mitchellh/cli - -require ( - github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 - github.com/bgentry/speakeasy v0.1.0 - github.com/fatih/color v1.7.0 - github.com/hashicorp/go-multierror v1.0.0 // indirect - github.com/mattn/go-colorable v0.0.9 // indirect - github.com/mattn/go-isatty v0.0.3 - github.com/posener/complete v1.1.1 - golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc // indirect -) diff --git a/vendor/github.com/mitchellh/cli/go.sum b/vendor/github.com/mitchellh/cli/go.sum deleted file mode 100644 index 03708752..00000000 --- a/vendor/github.com/mitchellh/cli/go.sum +++ /dev/null @@ -1,22 +0,0 @@ -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357 h1:Rem2+U35z1QtPQc6r+WolF7yXiefXqDKyk+lN2pE164= -github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0 h1:j30noezaCfvNLcdMYSvHLv81DxYRSt1grlpseG67vhU= -github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc h1:MeuS1UDyZyFH++6vVy44PuufTeFF0d0nfI6XB87YGSk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go deleted file mode 100644 index f5ca58f5..00000000 --- a/vendor/github.com/mitchellh/cli/help.go +++ /dev/null @@ -1,79 +0,0 @@ -package cli - -import ( - "bytes" - "fmt" - "log" - "sort" - "strings" -) - -// HelpFunc is the type of the function that is responsible for generating -// the help output when the CLI must show the general help text. -type HelpFunc func(map[string]CommandFactory) string - -// BasicHelpFunc generates some basic help output that is usually good enough -// for most CLI applications. -func BasicHelpFunc(app string) HelpFunc { - return func(commands map[string]CommandFactory) string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf( - "Usage: %s [--version] [--help] []\n\n", - app)) - buf.WriteString("Available commands are:\n") - - // Get the list of keys so we can sort them, and also get the maximum - // key length so they can be aligned properly. - keys := make([]string, 0, len(commands)) - maxKeyLen := 0 - for key := range commands { - if len(key) > maxKeyLen { - maxKeyLen = len(key) - } - - keys = append(keys, key) - } - sort.Strings(keys) - - for _, key := range keys { - commandFunc, ok := commands[key] - if !ok { - // This should never happen since we JUST built the list of - // keys. - panic("command not found: " + key) - } - - command, err := commandFunc() - if err != nil { - log.Printf("[ERR] cli: Command '%s' failed to load: %s", - key, err) - continue - } - - key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) - buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) - } - - return buf.String() - } -} - -// FilteredHelpFunc will filter the commands to only include the keys -// in the include parameter. -func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { - return func(commands map[string]CommandFactory) string { - set := make(map[string]struct{}) - for _, k := range include { - set[k] = struct{}{} - } - - filtered := make(map[string]CommandFactory) - for k, f := range commands { - if _, ok := set[k]; ok { - filtered[k] = f - } - } - - return f(filtered) - } -} diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go deleted file mode 100644 index a2d6f94f..00000000 --- a/vendor/github.com/mitchellh/cli/ui.go +++ /dev/null @@ -1,187 +0,0 @@ -package cli - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "os/signal" - "strings" - - "github.com/bgentry/speakeasy" - "github.com/mattn/go-isatty" -) - -// Ui is an interface for interacting with the terminal, or "interface" -// of a CLI. This abstraction doesn't have to be used, but helps provide -// a simple, layerable way to manage user interactions. -type Ui interface { - // Ask asks the user for input using the given query. The response is - // returned as the given string, or an error. - Ask(string) (string, error) - - // AskSecret asks the user for input using the given query, but does not echo - // the keystrokes to the terminal. - AskSecret(string) (string, error) - - // Output is called for normal standard output. - Output(string) - - // Info is called for information related to the previous output. - // In general this may be the exact same as Output, but this gives - // Ui implementors some flexibility with output formats. - Info(string) - - // Error is used for any error messages that might appear on standard - // error. - Error(string) - - // Warn is used for any warning messages that might appear on standard - // error. - Warn(string) -} - -// BasicUi is an implementation of Ui that just outputs to the given -// writer. This UI is not threadsafe by default, but you can wrap it -// in a ConcurrentUi to make it safe. -type BasicUi struct { - Reader io.Reader - Writer io.Writer - ErrorWriter io.Writer -} - -func (u *BasicUi) Ask(query string) (string, error) { - return u.ask(query, false) -} - -func (u *BasicUi) AskSecret(query string) (string, error) { - return u.ask(query, true) -} - -func (u *BasicUi) ask(query string, secret bool) (string, error) { - if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { - return "", err - } - - // Register for interrupts so that we can catch it and immediately - // return... - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) - defer signal.Stop(sigCh) - - // Ask for input in a go-routine so that we can ignore it. - errCh := make(chan error, 1) - lineCh := make(chan string, 1) - go func() { - var line string - var err error - if secret && isatty.IsTerminal(os.Stdin.Fd()) { - line, err = speakeasy.Ask("") - } else { - r := bufio.NewReader(u.Reader) - line, err = r.ReadString('\n') - } - if err != nil { - errCh <- err - return - } - - lineCh <- strings.TrimRight(line, "\r\n") - }() - - select { - case err := <-errCh: - return "", err - case line := <-lineCh: - return line, nil - case <-sigCh: - // Print a newline so that any further output starts properly - // on a new line. - fmt.Fprintln(u.Writer) - - return "", errors.New("interrupted") - } -} - -func (u *BasicUi) Error(message string) { - w := u.Writer - if u.ErrorWriter != nil { - w = u.ErrorWriter - } - - fmt.Fprint(w, message) - fmt.Fprint(w, "\n") -} - -func (u *BasicUi) Info(message string) { - u.Output(message) -} - -func (u *BasicUi) Output(message string) { - fmt.Fprint(u.Writer, message) - fmt.Fprint(u.Writer, "\n") -} - -func (u *BasicUi) Warn(message string) { - u.Error(message) -} - -// PrefixedUi is an implementation of Ui that prefixes messages. -type PrefixedUi struct { - AskPrefix string - AskSecretPrefix string - OutputPrefix string - InfoPrefix string - ErrorPrefix string - WarnPrefix string - Ui Ui -} - -func (u *PrefixedUi) Ask(query string) (string, error) { - if query != "" { - query = fmt.Sprintf("%s%s", u.AskPrefix, query) - } - - return u.Ui.Ask(query) -} - -func (u *PrefixedUi) AskSecret(query string) (string, error) { - if query != "" { - query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) - } - - return u.Ui.AskSecret(query) -} - -func (u *PrefixedUi) Error(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) - } - - u.Ui.Error(message) -} - -func (u *PrefixedUi) Info(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.InfoPrefix, message) - } - - u.Ui.Info(message) -} - -func (u *PrefixedUi) Output(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.OutputPrefix, message) - } - - u.Ui.Output(message) -} - -func (u *PrefixedUi) Warn(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.WarnPrefix, message) - } - - u.Ui.Warn(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go deleted file mode 100644 index b0ec4484..00000000 --- a/vendor/github.com/mitchellh/cli/ui_colored.go +++ /dev/null @@ -1,73 +0,0 @@ -package cli - -import ( - "github.com/fatih/color" -) - -const ( - noColor = -1 -) - -// UiColor is a posix shell color code to use. -type UiColor struct { - Code int - Bold bool -} - -// A list of colors that are useful. These are all non-bolded by default. -var ( - UiColorNone UiColor = UiColor{noColor, false} - UiColorRed = UiColor{int(color.FgHiRed), false} - UiColorGreen = UiColor{int(color.FgHiGreen), false} - UiColorYellow = UiColor{int(color.FgHiYellow), false} - UiColorBlue = UiColor{int(color.FgHiBlue), false} - UiColorMagenta = UiColor{int(color.FgHiMagenta), false} - UiColorCyan = UiColor{int(color.FgHiCyan), false} -) - -// ColoredUi is a Ui implementation that colors its output according -// to the given color schemes for the given type of output. -type ColoredUi struct { - OutputColor UiColor - InfoColor UiColor - ErrorColor UiColor - WarnColor UiColor - Ui Ui -} - -func (u *ColoredUi) Ask(query string) (string, error) { - return u.Ui.Ask(u.colorize(query, u.OutputColor)) -} - -func (u *ColoredUi) AskSecret(query string) (string, error) { - return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) -} - -func (u *ColoredUi) Output(message string) { - u.Ui.Output(u.colorize(message, u.OutputColor)) -} - -func (u *ColoredUi) Info(message string) { - u.Ui.Info(u.colorize(message, u.InfoColor)) -} - -func (u *ColoredUi) Error(message string) { - u.Ui.Error(u.colorize(message, u.ErrorColor)) -} - -func (u *ColoredUi) Warn(message string) { - u.Ui.Warn(u.colorize(message, u.WarnColor)) -} - -func (u *ColoredUi) colorize(message string, uc UiColor) string { - if uc.Code == noColor { - return message - } - - attr := []color.Attribute{color.Attribute(uc.Code)} - if uc.Bold { - attr = append(attr, color.Bold) - } - - return color.New(attr...).SprintFunc()(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go deleted file mode 100644 index b4f4dbfa..00000000 --- a/vendor/github.com/mitchellh/cli/ui_concurrent.go +++ /dev/null @@ -1,54 +0,0 @@ -package cli - -import ( - "sync" -) - -// ConcurrentUi is a wrapper around a Ui interface (and implements that -// interface) making the underlying Ui concurrency safe. -type ConcurrentUi struct { - Ui Ui - l sync.Mutex -} - -func (u *ConcurrentUi) Ask(query string) (string, error) { - u.l.Lock() - defer u.l.Unlock() - - return u.Ui.Ask(query) -} - -func (u *ConcurrentUi) AskSecret(query string) (string, error) { - u.l.Lock() - defer u.l.Unlock() - - return u.Ui.AskSecret(query) -} - -func (u *ConcurrentUi) Error(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Error(message) -} - -func (u *ConcurrentUi) Info(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Info(message) -} - -func (u *ConcurrentUi) Output(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Output(message) -} - -func (u *ConcurrentUi) Warn(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Warn(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go deleted file mode 100644 index 0bfe0a19..00000000 --- a/vendor/github.com/mitchellh/cli/ui_mock.go +++ /dev/null @@ -1,111 +0,0 @@ -package cli - -import ( - "bytes" - "fmt" - "io" - "sync" -) - -// NewMockUi returns a fully initialized MockUi instance -// which is safe for concurrent use. -func NewMockUi() *MockUi { - m := new(MockUi) - m.once.Do(m.init) - return m -} - -// MockUi is a mock UI that is used for tests and is exported publicly -// for use in external tests if needed as well. Do not instantite this -// directly since the buffers will be initialized on the first write. If -// there is no write then you will get a nil panic. Please use the -// NewMockUi() constructor function instead. You can fix your code with -// -// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go -type MockUi struct { - InputReader io.Reader - ErrorWriter *syncBuffer - OutputWriter *syncBuffer - - once sync.Once -} - -func (u *MockUi) Ask(query string) (string, error) { - u.once.Do(u.init) - - var result string - fmt.Fprint(u.OutputWriter, query) - if _, err := fmt.Fscanln(u.InputReader, &result); err != nil { - return "", err - } - - return result, nil -} - -func (u *MockUi) AskSecret(query string) (string, error) { - return u.Ask(query) -} - -func (u *MockUi) Error(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.ErrorWriter, message) - fmt.Fprint(u.ErrorWriter, "\n") -} - -func (u *MockUi) Info(message string) { - u.Output(message) -} - -func (u *MockUi) Output(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.OutputWriter, message) - fmt.Fprint(u.OutputWriter, "\n") -} - -func (u *MockUi) Warn(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.ErrorWriter, message) - fmt.Fprint(u.ErrorWriter, "\n") -} - -func (u *MockUi) init() { - u.ErrorWriter = new(syncBuffer) - u.OutputWriter = new(syncBuffer) -} - -type syncBuffer struct { - sync.RWMutex - b bytes.Buffer -} - -func (b *syncBuffer) Write(data []byte) (int, error) { - b.Lock() - defer b.Unlock() - return b.b.Write(data) -} - -func (b *syncBuffer) Read(data []byte) (int, error) { - b.RLock() - defer b.RUnlock() - return b.b.Read(data) -} - -func (b *syncBuffer) Reset() { - b.Lock() - b.b.Reset() - b.Unlock() -} - -func (b *syncBuffer) String() string { - return string(b.Bytes()) -} - -func (b *syncBuffer) Bytes() []byte { - b.RLock() - data := b.b.Bytes() - b.RUnlock() - return data -} diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go deleted file mode 100644 index 1e1db3cf..00000000 --- a/vendor/github.com/mitchellh/cli/ui_writer.go +++ /dev/null @@ -1,18 +0,0 @@ -package cli - -// UiWriter is an io.Writer implementation that can be used with -// loggers that writes every line of log output data to a Ui at the -// Info level. -type UiWriter struct { - Ui Ui -} - -func (w *UiWriter) Write(p []byte) (n int, err error) { - n = len(p) - if n > 0 && p[n-1] == '\n' { - p = p[:n-1] - } - - w.Ui.Info(string(p)) - return n, nil -} diff --git a/vendor/github.com/mitchellh/colorstring/.travis.yml b/vendor/github.com/mitchellh/colorstring/.travis.yml deleted file mode 100644 index 74e286ae..00000000 --- a/vendor/github.com/mitchellh/colorstring/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - tip - -script: - - go test - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/mitchellh/colorstring/README.md b/vendor/github.com/mitchellh/colorstring/README.md deleted file mode 100644 index 0654d454..00000000 --- a/vendor/github.com/mitchellh/colorstring/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# colorstring [![Build Status](https://travis-ci.org/mitchellh/colorstring.svg)](https://travis-ci.org/mitchellh/colorstring) - -colorstring is a [Go](http://www.golang.org) library for outputting colored -strings to a console using a simple inline syntax in your string to specify -the color to print as. - -For example, the string `[blue]hello [red]world` would output the text -"hello world" in two colors. The API of colorstring allows for easily disabling -colors, adding aliases, etc. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/colorstring -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/colorstring). - -Usage is easy enough: - -```go -colorstring.Println("[blue]Hello [red]World!") -``` - -Additionally, the `Colorize` struct can be used to set options such as -custom colors, color disabling, etc. diff --git a/vendor/github.com/mitchellh/colorstring/colorstring.go b/vendor/github.com/mitchellh/colorstring/colorstring.go deleted file mode 100644 index 3de5b241..00000000 --- a/vendor/github.com/mitchellh/colorstring/colorstring.go +++ /dev/null @@ -1,244 +0,0 @@ -// colorstring provides functions for colorizing strings for terminal -// output. -package colorstring - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -// Color colorizes your strings using the default settings. -// -// Strings given to Color should use the syntax `[color]` to specify the -// color for text following. For example: `[blue]Hello` will return "Hello" -// in blue. See DefaultColors for all the supported colors and attributes. -// -// If an unrecognized color is given, it is ignored and assumed to be part -// of the string. For example: `[hi]world` will result in "[hi]world". -// -// A color reset is appended to the end of every string. This will reset -// the color of following strings when you output this text to the same -// terminal session. -// -// If you want to customize any of this behavior, use the Colorize struct. -func Color(v string) string { - return def.Color(v) -} - -// ColorPrefix returns the color sequence that prefixes the given text. -// -// This is useful when wrapping text if you want to inherit the color -// of the wrapped text. For example, "[green]foo" will return "[green]". -// If there is no color sequence, then this will return "". -func ColorPrefix(v string) string { - return def.ColorPrefix(v) -} - -// Colorize colorizes your strings, giving you the ability to customize -// some of the colorization process. -// -// The options in Colorize can be set to customize colorization. If you're -// only interested in the defaults, just use the top Color function directly, -// which creates a default Colorize. -type Colorize struct { - // Colors maps a color string to the code for that color. The code - // is a string so that you can use more complex colors to set foreground, - // background, attributes, etc. For example, "boldblue" might be - // "1;34" - Colors map[string]string - - // If true, color attributes will be ignored. This is useful if you're - // outputting to a location that doesn't support colors and you just - // want the strings returned. - Disable bool - - // Reset, if true, will reset the color after each colorization by - // adding a reset code at the end. - Reset bool -} - -// Color colorizes a string according to the settings setup in the struct. -// -// For more details on the syntax, see the top-level Color function. -func (c *Colorize) Color(v string) string { - matches := parseRe.FindAllStringIndex(v, -1) - if len(matches) == 0 { - return v - } - - result := new(bytes.Buffer) - colored := false - m := []int{0, 0} - for _, nm := range matches { - // Write the text in between this match and the last - result.WriteString(v[m[1]:nm[0]]) - m = nm - - var replace string - if code, ok := c.Colors[v[m[0]+1:m[1]-1]]; ok { - colored = true - - if !c.Disable { - replace = fmt.Sprintf("\033[%sm", code) - } - } else { - replace = v[m[0]:m[1]] - } - - result.WriteString(replace) - } - result.WriteString(v[m[1]:]) - - if colored && c.Reset && !c.Disable { - // Write the clear byte at the end - result.WriteString("\033[0m") - } - - return result.String() -} - -// ColorPrefix returns the first color sequence that exists in this string. -// -// For example: "[green]foo" would return "[green]". If no color sequence -// exists, then "" is returned. This is especially useful when wrapping -// colored texts to inherit the color of the wrapped text. -func (c *Colorize) ColorPrefix(v string) string { - return prefixRe.FindString(strings.TrimSpace(v)) -} - -// DefaultColors are the default colors used when colorizing. -// -// If the color is surrounded in underscores, such as "_blue_", then that -// color will be used for the background color. -var DefaultColors map[string]string - -func init() { - DefaultColors = map[string]string{ - // Default foreground/background colors - "default": "39", - "_default_": "49", - - // Foreground colors - "black": "30", - "red": "31", - "green": "32", - "yellow": "33", - "blue": "34", - "magenta": "35", - "cyan": "36", - "light_gray": "37", - "dark_gray": "90", - "light_red": "91", - "light_green": "92", - "light_yellow": "93", - "light_blue": "94", - "light_magenta": "95", - "light_cyan": "96", - "white": "97", - - // Background colors - "_black_": "40", - "_red_": "41", - "_green_": "42", - "_yellow_": "43", - "_blue_": "44", - "_magenta_": "45", - "_cyan_": "46", - "_light_gray_": "47", - "_dark_gray_": "100", - "_light_red_": "101", - "_light_green_": "102", - "_light_yellow_": "103", - "_light_blue_": "104", - "_light_magenta_": "105", - "_light_cyan_": "106", - "_white_": "107", - - // Attributes - "bold": "1", - "dim": "2", - "underline": "4", - "blink_slow": "5", - "blink_fast": "6", - "invert": "7", - "hidden": "8", - - // Reset to reset everything to their defaults - "reset": "0", - "reset_bold": "21", - } - - def = Colorize{ - Colors: DefaultColors, - Reset: true, - } -} - -var def Colorize -var parseReRaw = `\[[a-z0-9_-]+\]` -var parseRe = regexp.MustCompile(`(?i)` + parseReRaw) -var prefixRe = regexp.MustCompile(`^(?i)(` + parseReRaw + `)+`) - -// Print is a convenience wrapper for fmt.Print with support for color codes. -// -// Print formats using the default formats for its operands and writes to -// standard output with support for color codes. Spaces are added between -// operands when neither is a string. It returns the number of bytes written -// and any write error encountered. -func Print(a string) (n int, err error) { - return fmt.Print(Color(a)) -} - -// Println is a convenience wrapper for fmt.Println with support for color -// codes. -// -// Println formats using the default formats for its operands and writes to -// standard output with support for color codes. Spaces are always added -// between operands and a newline is appended. It returns the number of bytes -// written and any write error encountered. -func Println(a string) (n int, err error) { - return fmt.Println(Color(a)) -} - -// Printf is a convenience wrapper for fmt.Printf with support for color codes. -// -// Printf formats according to a format specifier and writes to standard output -// with support for color codes. It returns the number of bytes written and any -// write error encountered. -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(Color(format), a...) -} - -// Fprint is a convenience wrapper for fmt.Fprint with support for color codes. -// -// Fprint formats using the default formats for its operands and writes to w -// with support for color codes. Spaces are added between operands when neither -// is a string. It returns the number of bytes written and any write error -// encountered. -func Fprint(w io.Writer, a string) (n int, err error) { - return fmt.Fprint(w, Color(a)) -} - -// Fprintln is a convenience wrapper for fmt.Fprintln with support for color -// codes. -// -// Fprintln formats using the default formats for its operands and writes to w -// with support for color codes. Spaces are always added between operands and a -// newline is appended. It returns the number of bytes written and any write -// error encountered. -func Fprintln(w io.Writer, a string) (n int, err error) { - return fmt.Fprintln(w, Color(a)) -} - -// Fprintf is a convenience wrapper for fmt.Fprintf with support for color -// codes. -// -// Fprintf formats according to a format specifier and writes to w with support -// for color codes. It returns the number of bytes written and any write error -// encountered. -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, Color(format), a...) -} diff --git a/vendor/github.com/mitchellh/colorstring/go.mod b/vendor/github.com/mitchellh/colorstring/go.mod deleted file mode 100644 index 446ff8d3..00000000 --- a/vendor/github.com/mitchellh/colorstring/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/colorstring diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go index 204afb42..b05a49a6 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/testing.go +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -22,6 +22,7 @@ type T interface { Log(args ...interface{}) Logf(format string, args ...interface{}) Name() string + Parallel() Skip(args ...interface{}) SkipNow() Skipf(format string, args ...interface{}) @@ -31,10 +32,12 @@ type T interface { // RuntimeT implements T and can be instantiated and run at runtime to // mimic *testing.T behavior. Unlike *testing.T, this will simply panic // for calls to Fatal. For calls to Error, you'll have to check the errors -// list to determine whether to exit yourself. Name and Skip methods are -// unimplemented noops. +// list to determine whether to exit yourself. +// +// Parallel does not do anything. type RuntimeT struct { - failed bool + failed bool + skipped bool } func (t *RuntimeT) Error(args ...interface{}) { @@ -77,8 +80,26 @@ func (t *RuntimeT) Logf(format string, args ...interface{}) { log.Println(fmt.Sprintf(format, args...)) } -func (t *RuntimeT) Name() string { return "" } -func (t *RuntimeT) Skip(args ...interface{}) {} -func (t *RuntimeT) SkipNow() {} -func (t *RuntimeT) Skipf(format string, args ...interface{}) {} -func (t *RuntimeT) Skipped() bool { return false } +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Parallel() {} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go index 31b42cad..f09c066a 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -27,6 +27,7 @@ type T interface { Log(args ...interface{}) Logf(format string, args ...interface{}) Name() string + Parallel() Skip(args ...interface{}) SkipNow() Skipf(format string, args ...interface{}) @@ -38,9 +39,11 @@ type T interface { // mimic *testing.T behavior. Unlike *testing.T, this will simply panic // for calls to Fatal. For calls to Error, you'll have to check the errors // list to determine whether to exit yourself. +// +// Parallel does not do anything. type RuntimeT struct { - skipped bool failed bool + skipped bool } func (t *RuntimeT) Error(args ...interface{}) { @@ -87,6 +90,8 @@ func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Parallel() {} + func (t *RuntimeT) Skip(args ...interface{}) { log.Print(args...) t.SkipNow() diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go deleted file mode 100644 index a4ae8901..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ /dev/null @@ -1,155 +0,0 @@ -// +build linux - -package system - -import ( - "os" - "os/exec" - "syscall" // only for exec - "unsafe" - - "github.com/opencontainers/runc/libcontainer/user" - "golang.org/x/sys/unix" -) - -// If arg2 is nonzero, set the "child subreaper" attribute of the -// calling process; if arg2 is zero, unset the attribute. When a -// process is marked as a child subreaper, all of the children -// that it creates, and their descendants, will be marked as -// having a subreaper. In effect, a subreaper fulfills the role -// of init(1) for its descendant processes. Upon termination of -// a process that is orphaned (i.e., its immediate parent has -// already terminated) and marked as having a subreaper, the -// nearest still living ancestor subreaper will receive a SIGCHLD -// signal and be able to wait(2) on the process to discover its -// termination status. -const PR_SET_CHILD_SUBREAPER = 36 - -type ParentDeathSignal int - -func (p ParentDeathSignal) Restore() error { - if p == 0 { - return nil - } - current, err := GetParentDeathSignal() - if err != nil { - return err - } - if p == current { - return nil - } - return p.Set() -} - -func (p ParentDeathSignal) Set() error { - return SetParentDeathSignal(uintptr(p)) -} - -func Execv(cmd string, args []string, env []string) error { - name, err := exec.LookPath(cmd) - if err != nil { - return err - } - - return syscall.Exec(name, args, env) -} - -func Prlimit(pid, resource int, limit unix.Rlimit) error { - _, _, err := unix.RawSyscall6(unix.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) - if err != 0 { - return err - } - return nil -} - -func SetParentDeathSignal(sig uintptr) error { - if err := unix.Prctl(unix.PR_SET_PDEATHSIG, sig, 0, 0, 0); err != nil { - return err - } - return nil -} - -func GetParentDeathSignal() (ParentDeathSignal, error) { - var sig int - if err := unix.Prctl(unix.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0, 0, 0); err != nil { - return -1, err - } - return ParentDeathSignal(sig), nil -} - -func SetKeepCaps() error { - if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 1, 0, 0, 0); err != nil { - return err - } - - return nil -} - -func ClearKeepCaps() error { - if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 0, 0, 0, 0); err != nil { - return err - } - - return nil -} - -func Setctty() error { - if err := unix.IoctlSetInt(0, unix.TIOCSCTTY, 0); err != nil { - return err - } - return nil -} - -// RunningInUserNS detects whether we are currently running in a user namespace. -// Originally copied from github.com/lxc/lxd/shared/util.go -func RunningInUserNS() bool { - uidmap, err := user.CurrentProcessUIDMap() - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return false - } - return UIDMapInUserNS(uidmap) -} - -func UIDMapInUserNS(uidmap []user.IDMap) bool { - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if len(uidmap) == 1 && uidmap[0].ID == 0 && uidmap[0].ParentID == 0 && uidmap[0].Count == 4294967295 { - return false - } - return true -} - -// GetParentNSeuid returns the euid within the parent user namespace -func GetParentNSeuid() int64 { - euid := int64(os.Geteuid()) - uidmap, err := user.CurrentProcessUIDMap() - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return euid - } - for _, um := range uidmap { - if um.ID <= euid && euid <= um.ID+um.Count-1 { - return um.ParentID + euid - um.ID - } - } - return euid -} - -// SetSubreaper sets the value i as the subreaper setting for the calling process -func SetSubreaper(i int) error { - return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) -} - -// GetSubreaper returns the subreaper setting for the calling process -func GetSubreaper() (int, error) { - var i uintptr - - if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil { - return -1, err - } - - return int(i), nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go deleted file mode 100644 index 79232a43..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go +++ /dev/null @@ -1,113 +0,0 @@ -package system - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "strconv" - "strings" -) - -// State is the status of a process. -type State rune - -const ( // Only values for Linux 3.14 and later are listed here - Dead State = 'X' - DiskSleep State = 'D' - Running State = 'R' - Sleeping State = 'S' - Stopped State = 'T' - TracingStop State = 't' - Zombie State = 'Z' -) - -// String forms of the state from proc(5)'s documentation for -// /proc/[pid]/status' "State" field. -func (s State) String() string { - switch s { - case Dead: - return "dead" - case DiskSleep: - return "disk sleep" - case Running: - return "running" - case Sleeping: - return "sleeping" - case Stopped: - return "stopped" - case TracingStop: - return "tracing stop" - case Zombie: - return "zombie" - default: - return fmt.Sprintf("unknown (%c)", s) - } -} - -// Stat_t represents the information from /proc/[pid]/stat, as -// described in proc(5) with names based on the /proc/[pid]/status -// fields. -type Stat_t struct { - // PID is the process ID. - PID uint - - // Name is the command run by the process. - Name string - - // State is the state of the process. - State State - - // StartTime is the number of clock ticks after system boot (since - // Linux 2.6). - StartTime uint64 -} - -// Stat returns a Stat_t instance for the specified process. -func Stat(pid int) (stat Stat_t, err error) { - bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) - if err != nil { - return stat, err - } - return parseStat(string(bytes)) -} - -// GetProcessStartTime is deprecated. Use Stat(pid) and -// Stat_t.StartTime instead. -func GetProcessStartTime(pid int) (string, error) { - stat, err := Stat(pid) - if err != nil { - return "", err - } - return fmt.Sprintf("%d", stat.StartTime), nil -} - -func parseStat(data string) (stat Stat_t, err error) { - // From proc(5), field 2 could contain space and is inside `(` and `)`. - // The following is an example: - // 89653 (gunicorn: maste) S 89630 89653 89653 0 -1 4194560 29689 28896 0 3 146 32 76 19 20 0 1 0 2971844 52965376 3920 18446744073709551615 1 1 0 0 0 0 0 16781312 137447943 0 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 - i := strings.LastIndex(data, ")") - if i <= 2 || i >= len(data)-1 { - return stat, fmt.Errorf("invalid stat data: %q", data) - } - - parts := strings.SplitN(data[:i], "(", 2) - if len(parts) != 2 { - return stat, fmt.Errorf("invalid stat data: %q", data) - } - - stat.Name = parts[1] - _, err = fmt.Sscanf(parts[0], "%d", &stat.PID) - if err != nil { - return stat, err - } - - // parts indexes should be offset by 3 from the field number given - // proc(5), because parts is zero-indexed and we've removed fields - // one (PID) and two (Name) in the paren-split. - parts = strings.Split(data[i+2:], " ") - var state int - fmt.Sscanf(parts[3-3], "%c", &state) - stat.State = State(state) - fmt.Sscanf(parts[22-3], "%d", &stat.StartTime) - return stat, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go deleted file mode 100644 index c5ca5d86..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux -// +build 386 arm - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go deleted file mode 100644 index 11c3faaf..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux -// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le s390x - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go deleted file mode 100644 index b8434f10..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build cgo,linux - -package system - -/* -#include -*/ -import "C" - -func GetClockTicks() int { - return int(C.sysconf(C._SC_CLK_TCK)) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go deleted file mode 100644 index d93b5d5f..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !cgo windows - -package system - -func GetClockTicks() int { - // TODO figure out a better alternative for platforms where we're missing cgo - // - // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency(). - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx - // - // An example of its usage can be found here. - // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx - - return 100 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go deleted file mode 100644 index b94be74a..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !linux - -package system - -import ( - "os" - - "github.com/opencontainers/runc/libcontainer/user" -) - -// RunningInUserNS is a stub for non-Linux systems -// Always returns false -func RunningInUserNS() bool { - return false -} - -// UIDMapInUserNS is a stub for non-Linux systems -// Always returns false -func UIDMapInUserNS(uidmap []user.IDMap) bool { - return false -} - -// GetParentNSeuid returns the euid within the parent user namespace -// Always returns os.Geteuid on non-linux -func GetParentNSeuid() int { - return os.Geteuid() -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go deleted file mode 100644 index a6823fc9..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go +++ /dev/null @@ -1,35 +0,0 @@ -package system - -import "golang.org/x/sys/unix" - -// Returns a []byte slice if the xattr is set and nil otherwise -// Requires path and its attribute as arguments -func Lgetxattr(path string, attr string) ([]byte, error) { - var sz int - // Start with a 128 length byte array - dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - - switch { - case errno == unix.ENODATA: - return nil, errno - case errno == unix.ENOTSUP: - return nil, errno - case errno == unix.ERANGE: - // 128 byte array might just not be good enough, - // A dummy buffer is used to get the real size - // of the xattrs on disk - sz, errno = unix.Lgetxattr(path, attr, []byte{}) - if errno != nil { - return nil, errno - } - dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) - if errno != nil { - return nil, errno - } - case errno != nil: - return nil, errno - } - return dest[:sz], nil -} diff --git a/vendor/github.com/ory/dockertest/v3/.gitignore b/vendor/github.com/ory/dockertest/v3/.gitignore new file mode 100644 index 00000000..5f02d39b --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/.gitignore @@ -0,0 +1,7 @@ +.idea/ +node_modules/ +*.iml +*.exe +.cover/ +vendor/ +cover.out diff --git a/vendor/github.com/ory/dockertest/v3/.travis.yml b/vendor/github.com/ory/dockertest/v3/.travis.yml new file mode 100644 index 00000000..cc763cfb --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/.travis.yml @@ -0,0 +1,18 @@ +go_import_path: github.com/ory/dockertest/v3 + +language: go + +services: + - docker + +go: + - 1.13 + +before_install: + - go get github.com/mattn/goveralls github.com/sonatype-nexus-community/nancy + +script: + - go list -m all | nancy -quiet + - go vet -x . + - go test -covermode=atomic -coverprofile="cover.out" . + - goveralls -coverprofile="cover.out" diff --git a/vendor/github.com/ory/dockertest/v3/CONTRIBUTING.md b/vendor/github.com/ory/dockertest/v3/CONTRIBUTING.md new file mode 100644 index 00000000..c84d7c8c --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/CONTRIBUTING.md @@ -0,0 +1,119 @@ +# Contributing to ORY Dockertest + + + + + +- [Introduction](#introduction) +- [Contributing Code](#contributing-code) +- [Disclosing vulnerabilities](#disclosing-vulnerabilities) +- [Code Style](#code-style) +- [Pull request procedure](#pull-request-procedure) +- [Communication](#communication) +- [Conduct](#conduct) + + + +## Introduction + +Please note: We take ORY Dockertest's security and our users' trust very +seriously. If you believe you have found a security issue in ORY Dockertest, +please responsibly disclose by contacting us at hi@ory.sh. + +First: if you're unsure or afraid of anything, just ask or submit the issue or +pull request anyways. You won't be yelled at for giving it your best effort. The +worst that can happen is that you'll be politely asked to change something. We +appreciate any sort of contributions, and don't want a wall of rules to get in +the way of that. + +That said, if you want to ensure that a pull request is likely to be merged, +talk to us! You can find out our thoughts and ensure that your contribution +won't clash or be obviated by ORY Dockertest's normal direction. A great way to +do this is via the [ORY Community](https://community.ory.sh/) or join the +[ORY Chat](https://www.ory.sh/chat). + +## Contributing Code + +Unless you are fixing a known bug, we **strongly** recommend discussing it with +the core team via a GitHub issue or [in our chat](https://www.ory.sh/chat) +before getting started to ensure your work is consistent with ORY Dockertest's +roadmap and architecture. + +All contributions are made via pull request. Note that **all patches from all +contributors get reviewed**. After a pull request is made other contributors +will offer feedback, and if the patch passes review a maintainer will accept it +with a comment. When pull requests fail testing, authors are expected to update +their pull requests to address the failures until the tests pass and the pull +request merges successfully. + +At least one review from a maintainer is required for all patches (even patches +from maintainers). + +Reviewers should leave a "LGTM" comment once they are satisfied with the patch. +If the patch was submitted by a maintainer with write access, the pull request +should be merged by the submitter after review. + +## Disclosing vulnerabilities + +Please disclose vulnerabilities exclusively to [hi@ory.am](mailto:hi@ory.am). Do +not use GitHub issues. + +## Code Style + +Please follow these guidelines when formatting source code: + +- Go code should match the output of `gofmt -s` + +## Pull request procedure + +To make a pull request, you will need a GitHub account; if you are unclear on +this process, see GitHub's documentation on +[forking](https://help.github.com/articles/fork-a-repo) and +[pull requests](https://help.github.com/articles/using-pull-requests). Pull +requests should be targeted at the `master` branch. Before creating a pull +request, go through this checklist: + +1. Create a feature branch off of `master` so that changes do not get mixed up. +1. [Rebase](http://git-scm.com/book/en/Git-Branching-Rebasing) your local + changes against the `master` branch. +1. Run the full project test suite with the `go test ./...` (or equivalent) + command and confirm that it passes. +1. Run `gofmt -s` (if the project is written in Go). +1. Ensure that each commit has a subsystem prefix (ex: `controller:`). + +Pull requests will be treated as "review requests," and maintainers will give +feedback on the style and substance of the patch. + +Normally, all pull requests must include tests that test your change. +Occasionally, a change will be very difficult to test for. In those cases, +please include a note in your commit message explaining why. + +## Communication + +We use [discord](https://www.ory.sh/chat). You are welcome to drop in and ask +questions, discuss bugs, etc. + +## Conduct + +Whether you are a regular contributor or a newcomer, we care about making this +community a safe place for you and we've got your back. + +- We are committed to providing a friendly, safe and welcoming environment for + all, regardless of gender, sexual orientation, disability, ethnicity, + religion, or similar personal characteristic. +- Please avoid using nicknames that might detract from a friendly, safe and + welcoming environment for all. +- Be kind and courteous. There is no need to be mean or rude. +- We will exclude you from interaction if you insult, demean or harass anyone. + In particular, we do not tolerate behavior that excludes people in socially + marginalized groups. +- Private harassment is also unacceptable. No matter who you are, if you feel + you have been or are being harassed or made uncomfortable by a community + member, please contact one of the channel ops or a member of the ORY + Dockertest core team immediately. +- Likewise any spamming, trolling, flaming, baiting or other attention-stealing + behaviour is not welcome. + +We welcome discussion about creating a welcoming, safe, and productive +environment for the community. If you have any questions, feedback, or concerns +[please let us know](https://www.ory.sh/chat). diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE b/vendor/github.com/ory/dockertest/v3/LICENSE similarity index 99% rename from vendor/github.com/zclconf/go-cty-yaml/LICENSE rename to vendor/github.com/ory/dockertest/v3/LICENSE index 8dada3ed..d6456956 100644 --- a/vendor/github.com/zclconf/go-cty-yaml/LICENSE +++ b/vendor/github.com/ory/dockertest/v3/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +179,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/ory/dockertest/v3/README.md b/vendor/github.com/ory/dockertest/v3/README.md new file mode 100644 index 00000000..4b3251ed --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/README.md @@ -0,0 +1,121 @@ +

ORY Dockertest

+ +[![Build Status](https://travis-ci.org/ory/dockertest.svg)](https://travis-ci.org/ory/dockertest?branch=master) +[![Coverage Status](https://coveralls.io/repos/github/ory/dockertest/badge.svg?branch=v3)](https://coveralls.io/github/ory/dockertest?branch=v3) + +Use Docker to run your Go language integration tests against third party services on **Microsoft Windows, Mac OSX and Linux**! + + + +**Table of Contents** + +- [Why should I use Dockertest?](#why-should-i-use-dockertest) +- [Installing and using Dockertest](#installing-and-using-dockertest) + - [Using Dockertest](#using-dockertest) + - [Examples](#examples) +- [Troubleshoot & FAQ](#troubleshoot-&-faq) + - [Out of disk space](#out-of-disk-space) + - [Removing old containers](#removing-old-containers) + + + +## Why should I use Dockertest? + +When developing applications, it is often necessary to use services that talk to a database system. +Unit Testing these services can be cumbersome because mocking database/DBAL is strenuous. Making slight changes to the +schema implies rewriting at least some, if not all of the mocks. The same goes for API changes in the DBAL. +To avoid this, it is smarter to test these specific services against a real database that is destroyed after testing. +Docker is the perfect system for running unit tests as you can spin up containers in a few seconds and kill them when +the test completes. The Dockertest library provides easy to use commands for spinning up Docker containers and using +them for your tests. + +## Installing and using Dockertest + +Using Dockertest is straightforward and simple. Check the [releases tab](https://github.com/ory/dockertest/releases) +for available releases. + +To install dockertest, run +``` +go get -u github.com/ory/dockertest/v3 +``` +or +``` +dep ensure -add github.com/ory/dockertest@v3.x.y +``` + +### Using Dockertest + +```go +package dockertest_test + +import ( + "database/sql" + "fmt" + "log" + "os" + "testing" + + _ "github.com/go-sql-driver/mysql" + "github.com/ory/dockertest/v3" +) + +var db *sql.DB + +func TestMain(m *testing.M) { + // uses a sensible default on windows (tcp/http) and linux/osx (socket) + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + // pulls an image, creates a container based on it and runs it + resource, err := pool.Run("mysql", "5.7", []string{"MYSQL_ROOT_PASSWORD=secret"}) + if err != nil { + log.Fatalf("Could not start resource: %s", err) + } + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + if err := pool.Retry(func() error { + var err error + db, err = sql.Open("mysql", fmt.Sprintf("root:secret@(localhost:%s)/mysql", resource.GetPort("3306/tcp"))) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + code := m.Run() + + // You can't defer this because os.Exit doesn't care for defer + if err := pool.Purge(resource); err != nil { + log.Fatalf("Could not purge resource: %s", err) + } + + os.Exit(code) +} + +func TestSomething(t *testing.T) { + // db.Query() +} +``` + +### Examples + +We provide code examples for well known services in the [examples](examples/) directory, check them out! + +## Troubleshoot & FAQ + +### Out of disk space + +Try cleaning up the images with [docker-cleanup-volumes](https://github.com/chadoe/docker-cleanup-volumes). + +### Removing old containers + +Sometimes container clean up fails. Check out +[this stackoverflow question](http://stackoverflow.com/questions/21398087/how-to-delete-dockers-images) on how to fix this. You may also set an absolute lifetime on containers: + +```go +resource.Expire(60) // Tell docker to hard kill the container in 60 seconds +``` diff --git a/vendor/github.com/ory/dockertest/v3/SECURITY.md b/vendor/github.com/ory/dockertest/v3/SECURITY.md new file mode 100644 index 00000000..d65c31cd --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/SECURITY.md @@ -0,0 +1,28 @@ + + +**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* + +- [Security Policy](#security-policy) + - [Supported Versions](#supported-versions) + - [Reporting a Vulnerability](#reporting-a-vulnerability) + + + +# Security Policy + +## Supported Versions + +We release patches for security vulnerabilities. Which versions are eligible +receiving such patches depend on the CVSS v3.0 Rating: + +| CVSS v3.0 | Supported Versions | +| --------- | ----------------------------------------- | +| 9.0-10.0 | Releases within the previous three months | +| 4.0-8.9 | Most recent release | + +## Reporting a Vulnerability + +Please report (suspected) security vulnerabilities to +**[security@ory.sh](mailto:security@ory.sh)**. You will receive a response from +us within 48 hours. If the issue is confirmed, we will release a patch as soon +as possible depending on complexity but historically within a few days. diff --git a/vendor/github.com/ory/dockertest/v3/docker/AUTHORS b/vendor/github.com/ory/dockertest/v3/docker/AUTHORS new file mode 100644 index 00000000..464d9498 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/AUTHORS @@ -0,0 +1,192 @@ +# This is the official list of go-dockerclient authors for copyright purposes. + +Abhishek Chanda +Adam Bell-Hanssen +Adnan Khan +Adrien Kohlbecker +Aldrin Leal +Alex Dadgar +Alfonso Acosta +André Carvalho +Andreas Jaekle +Andrew Snodgrass +Andrews Medina +Andrey Sibiryov +Andy Goldstein +Anirudh Aithal +Antonio Murdaca +Artem Sidorenko +Arthur Rodrigues +Ben Marini +Ben McCann +Ben Parees +Benno van den Berg +Bradley Cicenas +Brendan Fosberry +Brian Lalor +Brian P. Hamachek +Brian Palmer +Bryan Boreham +Burke Libbey +Carlos Diaz-Padron +Carson A +Cássio Botaro +Cesar Wong +Cezar Sa Espinola +Changping Chen +Cheah Chu Yeow +cheneydeng +Chris Bednarski +Chris Stavropoulos +Christian Stewart +Christophe Mourette +Clayton Coleman +Clint Armstrong +CMGS +Colin Hebert +Craig Jellick +Damien Lespiau +Damon Wang +Dan Williams +Daniel, Dao Quang Minh +Daniel Garcia +Daniel Hiltgen +Daniel Nephin +Daniel Tsui +Darren Shepherd +Dave Choi +David Huie +Dawn Chen +Denis Makogon +Derek Petersen +Dinesh Subhraveti +Drew Wells +Ed +Elias G. Schneevoigt +Erez Horev +Eric Anderson +Eric J. Holmes +Eric Mountain +Erwin van Eyk +Ethan Mosbaugh +Ewout Prangsma +Fabio Rehm +Fatih Arslan +Felipe Oliveira +Flavia Missi +Florent Aide +Francisco Souza +Frank Groeneveld +George Moura +Grégoire Delattre +Guilherme Rezende +Guillermo Álvarez Fernández +Harry Zhang +He Simei +Isaac Schnitzer +Ivan Mikushin +James Bardin +James Nugent +Jamie Snell +Januar Wayong +Jari Kolehmainen +Jason Wilder +Jawher Moussa +Jean-Baptiste Dalido +Jeff Mitchell +Jeffrey Hulten +Jen Andre +Jérôme Laurens +Jim Minter +Johan Euphrosine +Johannes Scheuermann +John Hughes +Jorge Marey +Julian Einwag +Kamil Domanski +Karan Misra +Ken Herner +Kevin Lin +Kevin Xu +Kim, Hirokuni +Kostas Lekkas +Kyle Allan +Yunhee Lee +Liron Levin +Lior Yankovich +Liu Peng +Lorenz Leutgeb +Lucas Clemente +Lucas Weiblen +Lyon Hill +Mantas Matelis +Manuel Vogel +Marguerite des Trois Maisons +Mariusz Borsa +Martin Sweeney +Máximo Cuadros Ortiz +Michael Schmatz +Michal Fojtik +Mike Dillon +Mrunal Patel +Nate Jones +Nguyen Sy Thanh Son +Nicholas Van Wiggeren +Nick Ethier +niko83 +Omeid Matten +Orivej Desh +Paul Bellamy +Paul Morie +Paul Weil +Peter Edge +Peter Jihoon Kim +Peter Teich +Phil Lu +Philippe Lafoucrière +Radek Simko +Rafe Colton +Raphaël Pinson +Reed Allman +RJ Catalano +Rob Miller +Robbert Klarenbeek +Robert Williamson +Roman Khlystik +Russell Haering +Salvador Gironès +Sam Rijs +Sami Wagiaalla +Samuel Archambault +Samuel Karp +Sebastian Borza +Seth Jennings +Shane Xie +Silas Sewell +Simon Eskildsen +Simon Menke +Skolos +Soulou +Sridhar Ratnakumar +Steven Jack +Summer Mousa +Sunjin Lee +Sunny +Swaroop Ramachandra +Tarsis Azevedo +Tim Schindler +Timothy St. Clair +Tobi Knaup +Tom Wilkie +Tonic +ttyh061 +upccup +Victor Marmol +Vincenzo Prignano +Vlad Alexandru Ionescu +Weitao Zhou +Wiliam Souza +Ye Yin +Yosuke Otosu +Yu, Zou +Yuriy Bogdanov diff --git a/vendor/github.com/ory/dockertest/v3/docker/DOCKER-LICENSE b/vendor/github.com/ory/dockertest/v3/docker/DOCKER-LICENSE new file mode 100644 index 00000000..70663447 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/DOCKER-LICENSE @@ -0,0 +1,6 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +You can find the Docker license at the following link: +https://raw.githubusercontent.com/docker/docker/master/LICENSE diff --git a/vendor/github.com/ory/dockertest/v3/docker/LICENSE b/vendor/github.com/ory/dockertest/v3/docker/LICENSE new file mode 100644 index 00000000..f3ce3a9a --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013-2018, go-dockerclient authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ory/dockertest/v3/docker/README.markdown b/vendor/github.com/ory/dockertest/v3/docker/README.markdown new file mode 100644 index 00000000..86824d6c --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/README.markdown @@ -0,0 +1,133 @@ +# go-dockerclient + +[![Travis Build Status](https://travis-ci.org/fsouza/go-dockerclient.svg?branch=master)](https://travis-ci.org/fsouza/go-dockerclient) +[![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/4m374pti06ubg2l7?svg=true)](https://ci.appveyor.com/project/fsouza/go-dockerclient) +[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient) + +This package presents a client for the Docker remote API. It also provides +support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/). + +This package also provides support for docker's network API, which is a simple +passthrough to the libnetwork remote API. Note that docker's network API is +only available in docker 1.8 and above, and only enabled in docker if +DOCKER_EXPERIMENTAL is defined during the docker build process. + +For more details, check the [remote API +documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/). + +## Example + +```go +package main + +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) + +func main() { + endpoint := "unix:///var/run/docker.sock" + client, err := docker.NewClient(endpoint) + if err != nil { + panic(err) + } + imgs, err := client.ListImages(docker.ListImagesOptions{All: false}) + if err != nil { + panic(err) + } + for _, img := range imgs { + fmt.Println("ID: ", img.ID) + fmt.Println("RepoTags: ", img.RepoTags) + fmt.Println("Created: ", img.Created) + fmt.Println("Size: ", img.Size) + fmt.Println("VirtualSize: ", img.VirtualSize) + fmt.Println("ParentId: ", img.ParentID) + } +} +``` + +## Using with TLS + +In order to instantiate the client for a TLS-enabled daemon, you should use +NewTLSClient, passing the endpoint and path for key and certificates as +parameters. + +```go +package main + +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) + +func main() { + endpoint := "tcp://[ip]:[port]" + path := os.Getenv("DOCKER_CERT_PATH") + ca := fmt.Sprintf("%s/ca.pem", path) + cert := fmt.Sprintf("%s/cert.pem", path) + key := fmt.Sprintf("%s/key.pem", path) + client, _ := docker.NewTLSClient(endpoint, cert, key, ca) + // use client +} +``` + +If using [docker-machine](https://docs.docker.com/machine/), or another +application that exports environment variables `DOCKER_HOST`, +`DOCKER_TLS_VERIFY`, `DOCKER_CERT_PATH`, you can use NewClientFromEnv. + + +```go +package main + +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) + +func main() { + client, _ := docker.NewClientFromEnv() + // use client +} +``` + +See the documentation for more details. + +## Developing + +All development commands can be seen in the [Makefile](Makefile). + +Commited code must pass: + +* [golint](https://github.com/golang/lint) (with some exceptions, see the Makefile). +* [go vet](https://golang.org/cmd/vet/) +* [gofmt](https://golang.org/cmd/gofmt) +* [go test](https://golang.org/cmd/go/#hdr-Test_packages) + +Running `make test` will check all of these. If your editor does not +automatically call ``gofmt -s``, `make fmt` will format all go files in this +repository. + +## Vendoring + +go-dockerclient uses [dep](https://github.com/golang/dep/) for vendoring. If +you're using dep, you should be able to pick go-dockerclient releases and get +the proper dependencies. + +With other vendoring tools, users might need to specify go-dockerclient's +dependencies manually. + +## Using with Docker 1.9 and Go 1.4 + +There's a tag for using go-dockerclient with Docker 1.9 (which requires +compiling go-dockerclient with Go 1.4), the tag name is ``docker-1.9/go-1.4``. + +The instructions below can be used to get a version of go-dockerclient that compiles with Go 1.4: + +``` +% git clone -b docker-1.9/go-1.4 https://github.com/fsouza/go-dockerclient.git $GOPATH/src/github.com/fsouza/go-dockerclient +% git clone -b v1.9.1 https://github.com/docker/docker.git $GOPATH/src/github.com/docker/docker +% go get github.com/fsouza/go-dockerclient +``` diff --git a/vendor/github.com/ory/dockertest/v3/docker/auth.go b/vendor/github.com/ory/dockertest/v3/docker/auth.go new file mode 100644 index 00000000..c58de867 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/auth.go @@ -0,0 +1,185 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "strings" +) + +// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed. +var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg") + +// AuthConfiguration represents authentication options to use in the PushImage +// method. It represents the authentication in the Docker index server. +type AuthConfiguration struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Email string `json:"email,omitempty"` + ServerAddress string `json:"serveraddress,omitempty"` +} + +// AuthConfigurations represents authentication options to use for the +// PushImage method accommodating the new X-Registry-Config header +type AuthConfigurations struct { + Configs map[string]AuthConfiguration `json:"configs"` +} + +// AuthConfigurations119 is used to serialize a set of AuthConfigurations +// for Docker API >= 1.19. +type AuthConfigurations119 map[string]AuthConfiguration + +// dockerConfig represents a registry authentation configuration from the +// .dockercfg file. +type dockerConfig struct { + Auth string `json:"auth"` + Email string `json:"email"` +} + +// NewAuthConfigurationsFromFile returns AuthConfigurations from a path containing JSON +// in the same format as the .dockercfg file. +func NewAuthConfigurationsFromFile(path string) (*AuthConfigurations, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + return NewAuthConfigurations(r) +} + +func cfgPaths(dockerConfigEnv string, homeEnv string) []string { + var paths []string + if dockerConfigEnv != "" { + paths = append(paths, path.Join(dockerConfigEnv, "config.json")) + } + if homeEnv != "" { + paths = append(paths, path.Join(homeEnv, ".docker", "config.json")) + paths = append(paths, path.Join(homeEnv, ".dockercfg")) + } + return paths +} + +// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from +// system config files. The following files are checked in the order listed: +// - $DOCKER_CONFIG/config.json if DOCKER_CONFIG set in the environment, +// - $HOME/.docker/config.json +// - $HOME/.dockercfg +func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) { + err := fmt.Errorf("No docker configuration found") + var auths *AuthConfigurations + + pathsToTry := cfgPaths(os.Getenv("DOCKER_CONFIG"), os.Getenv("HOME")) + for _, path := range pathsToTry { + auths, err = NewAuthConfigurationsFromFile(path) + if err == nil { + return auths, nil + } + } + return auths, err +} + +// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the +// same format as the .dockercfg file. +func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) { + var auth *AuthConfigurations + confs, err := parseDockerConfig(r) + if err != nil { + return nil, err + } + auth, err = authConfigs(confs) + if err != nil { + return nil, err + } + return auth, nil +} + +func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) { + buf := new(bytes.Buffer) + buf.ReadFrom(r) + byteData := buf.Bytes() + + confsWrapper := struct { + Auths map[string]dockerConfig `json:"auths"` + }{} + if err := json.Unmarshal(byteData, &confsWrapper); err == nil { + if len(confsWrapper.Auths) > 0 { + return confsWrapper.Auths, nil + } + } + + var confs map[string]dockerConfig + if err := json.Unmarshal(byteData, &confs); err != nil { + return nil, err + } + return confs, nil +} + +// authConfigs converts a dockerConfigs map to a AuthConfigurations object. +func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { + c := &AuthConfigurations{ + Configs: make(map[string]AuthConfiguration), + } + for reg, conf := range confs { + if conf.Auth == "" { + continue + } + data, err := base64.StdEncoding.DecodeString(conf.Auth) + if err != nil { + return nil, err + } + userpass := strings.SplitN(string(data), ":", 2) + if len(userpass) != 2 { + return nil, ErrCannotParseDockercfg + } + c.Configs[reg] = AuthConfiguration{ + Email: conf.Email, + Username: userpass[0], + Password: userpass[1], + ServerAddress: reg, + } + } + return c, nil +} + +// AuthStatus returns the authentication status for Docker API versions >= 1.23. +type AuthStatus struct { + Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` + IdentityToken string `json:"IdentityToken,omitempty" yaml:"IdentityToken,omitempty" toml:"IdentityToken,omitempty"` +} + +// AuthCheck validates the given credentials. It returns nil if successful. +// +// For Docker API versions >= 1.23, the AuthStatus struct will be populated, otherwise it will be empty.` +// +// See https://goo.gl/6nsZkH for more details. +func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) { + var authStatus AuthStatus + if conf == nil { + return authStatus, errors.New("conf is nil") + } + resp, err := c.do("POST", "/auth", doOptions{data: conf}) + if err != nil { + return authStatus, err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return authStatus, err + } + if len(data) == 0 { + return authStatus, nil + } + if err := json.Unmarshal(data, &authStatus); err != nil { + return authStatus, err + } + return authStatus, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/change.go b/vendor/github.com/ory/dockertest/v3/docker/change.go new file mode 100644 index 00000000..3f936b22 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/change.go @@ -0,0 +1,43 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import "fmt" + +// ChangeType is a type for constants indicating the type of change +// in a container +type ChangeType int + +const ( + // ChangeModify is the ChangeType for container modifications + ChangeModify ChangeType = iota + + // ChangeAdd is the ChangeType for additions to a container + ChangeAdd + + // ChangeDelete is the ChangeType for deletions from a container + ChangeDelete +) + +// Change represents a change in a container. +// +// See https://goo.gl/Wo0JJp for more details. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + var kind string + switch change.Kind { + case ChangeModify: + kind = "C" + case ChangeAdd: + kind = "A" + case ChangeDelete: + kind = "D" + } + return fmt.Sprintf("%s %s", kind, change.Path) +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/client.go b/vendor/github.com/ory/dockertest/v3/docker/client.go new file mode 100644 index 00000000..4a7e7021 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/client.go @@ -0,0 +1,1092 @@ +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package docker provides a client for the Docker remote API. +// +// See https://goo.gl/o2v3rk for more details on the remote API. +package docker + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/ory/dockertest/v3/docker/opts" + "github.com/ory/dockertest/v3/docker/pkg/homedir" + "github.com/ory/dockertest/v3/docker/pkg/jsonmessage" + "github.com/ory/dockertest/v3/docker/pkg/stdcopy" +) + +const ( + userAgent = "go-dockerclient" + + unixProtocol = "unix" + namedPipeProtocol = "npipe" +) + +var ( + // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL. + ErrInvalidEndpoint = errors.New("invalid endpoint") + + // ErrConnectionRefused is returned when the client cannot connect to the given endpoint. + ErrConnectionRefused = errors.New("cannot connect to Docker endpoint") + + // ErrInactivityTimeout is returned when a streamable call has been inactive for some time. + ErrInactivityTimeout = errors.New("inactivity time exceeded timeout") + + apiVersion112, _ = NewAPIVersion("1.12") + apiVersion119, _ = NewAPIVersion("1.19") + apiVersion124, _ = NewAPIVersion("1.24") + apiVersion125, _ = NewAPIVersion("1.25") +) + +// APIVersion is an internal representation of a version of the Remote API. +type APIVersion []int + +// NewAPIVersion returns an instance of APIVersion for the given string. +// +// The given string must be in the form .., where , +// and are integer numbers. +func NewAPIVersion(input string) (APIVersion, error) { + if !strings.Contains(input, ".") { + return nil, fmt.Errorf("Unable to parse version %q", input) + } + raw := strings.Split(input, "-") + arr := strings.Split(raw[0], ".") + ret := make(APIVersion, len(arr)) + var err error + for i, val := range arr { + ret[i], err = strconv.Atoi(val) + if err != nil { + return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val) + } + } + return ret, nil +} + +func (version APIVersion) String() string { + var str string + for i, val := range version { + str += strconv.Itoa(val) + if i < len(version)-1 { + str += "." + } + } + return str +} + +// LessThan is a function for comparing APIVersion structs +func (version APIVersion) LessThan(other APIVersion) bool { + return version.compare(other) < 0 +} + +// LessThanOrEqualTo is a function for comparing APIVersion structs +func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool { + return version.compare(other) <= 0 +} + +// GreaterThan is a function for comparing APIVersion structs +func (version APIVersion) GreaterThan(other APIVersion) bool { + return version.compare(other) > 0 +} + +// GreaterThanOrEqualTo is a function for comparing APIVersion structs +func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool { + return version.compare(other) >= 0 +} + +func (version APIVersion) compare(other APIVersion) int { + for i, v := range version { + if i <= len(other)-1 { + otherVersion := other[i] + + if v < otherVersion { + return -1 + } else if v > otherVersion { + return 1 + } + } + } + if len(version) > len(other) { + return 1 + } + if len(version) < len(other) { + return -1 + } + return 0 +} + +// Client is the basic type of this package. It provides methods for +// interaction with the API. +type Client struct { + SkipServerVersionCheck bool + HTTPClient *http.Client + TLSConfig *tls.Config + Dialer Dialer + + endpoint string + endpointURL *url.URL + eventMonitor *eventMonitoringState + requestedAPIVersion APIVersion + serverAPIVersion APIVersion + expectedAPIVersion APIVersion +} + +// Dialer is an interface that allows network connections to be dialed +// (net.Dialer fulfills this interface) and named pipes (a shim using +// winio.DialPipe) +type Dialer interface { + Dial(network, address string) (net.Conn, error) +} + +// NewClient returns a Client instance ready for communication with the given +// server endpoint. It will use the latest remote API version available in the +// server. +func NewClient(endpoint string) (*Client, error) { + client, err := NewVersionedClient(endpoint, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewTLSClient returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates . It will use the latest remote API version +// available in the server. +func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) { + client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates (passed inline to the function as opposed to being +// read from a local file). It will use the latest remote API version available in the server. +func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) { + client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewVersionedClient returns a Client instance ready for communication with +// the given server endpoint, using a specific remote API version. +func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) { + u, err := parseEndpoint(endpoint, false) + if err != nil { + return nil, err + } + var requestedAPIVersion APIVersion + if strings.Contains(apiVersionString, ".") { + requestedAPIVersion, err = NewAPIVersion(apiVersionString) + if err != nil { + return nil, err + } + } + c := &Client{ + HTTPClient: defaultClient(), + Dialer: &net.Dialer{}, + endpoint: endpoint, + endpointURL: u, + eventMonitor: new(eventMonitoringState), + requestedAPIVersion: requestedAPIVersion, + } + c.initializeNativeClient(defaultTransport) + return c, nil +} + +// WithTransport replaces underlying HTTP client of Docker Client by accepting +// a function that returns pointer to a transport object. +func (c *Client) WithTransport(trFunc func() *http.Transport) { + c.initializeNativeClient(trFunc) +} + +// NewVersionnedTLSClient is like NewVersionedClient, but with ann extra n. +// +// Deprecated: Use NewVersionedTLSClient instead. +func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { + return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString) +} + +// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates, using a specific remote API version. +func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { + var certPEMBlock []byte + var keyPEMBlock []byte + var caPEMCert []byte + if _, err := os.Stat(cert); !os.IsNotExist(err) { + certPEMBlock, err = ioutil.ReadFile(cert) + if err != nil { + return nil, err + } + } + if _, err := os.Stat(key); !os.IsNotExist(err) { + keyPEMBlock, err = ioutil.ReadFile(key) + if err != nil { + return nil, err + } + } + if _, err := os.Stat(ca); !os.IsNotExist(err) { + caPEMCert, err = ioutil.ReadFile(ca) + if err != nil { + return nil, err + } + } + return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString) +} + +// NewClientFromEnv returns a Client instance ready for communication created from +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH. +// +// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. +// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +func NewClientFromEnv() (*Client, error) { + client, err := NewVersionedClientFromEnv("") + if err != nil { + return nil, err + } + client.SkipServerVersionCheck = true + return client, nil +} + +// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from +// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH, +// and using a specific remote API version. +// +// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. +// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. +func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) { + dockerEnv, err := getDockerEnv() + if err != nil { + return nil, err + } + dockerHost := dockerEnv.dockerHost + if dockerEnv.dockerTLSVerify { + parts := strings.SplitN(dockerEnv.dockerHost, "://", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost) + } + cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem") + key := filepath.Join(dockerEnv.dockerCertPath, "key.pem") + ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem") + return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString) + } + return NewVersionedClient(dockerEnv.dockerHost, apiVersionString) +} + +// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens +// server endpoint, key and certificates (passed inline to the function as opposed to being +// read from a local file), using a specific remote API version. +func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) { + u, err := parseEndpoint(endpoint, true) + if err != nil { + return nil, err + } + var requestedAPIVersion APIVersion + if strings.Contains(apiVersionString, ".") { + requestedAPIVersion, err = NewAPIVersion(apiVersionString) + if err != nil { + return nil, err + } + } + tlsConfig := &tls.Config{} + if certPEMBlock != nil && keyPEMBlock != nil { + tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + } + if caPEMCert == nil { + tlsConfig.InsecureSkipVerify = true + } else { + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(caPEMCert) { + return nil, errors.New("Could not add RootCA pem") + } + tlsConfig.RootCAs = caPool + } + tr := defaultTransport() + tr.TLSClientConfig = tlsConfig + if err != nil { + return nil, err + } + c := &Client{ + HTTPClient: &http.Client{Transport: tr}, + TLSConfig: tlsConfig, + Dialer: &net.Dialer{}, + endpoint: endpoint, + endpointURL: u, + eventMonitor: new(eventMonitoringState), + requestedAPIVersion: requestedAPIVersion, + } + c.initializeNativeClient(defaultTransport) + return c, nil +} + +// SetTimeout takes a timeout and applies it to the HTTPClient. It should not +// be called concurrently with any other Client methods. +func (c *Client) SetTimeout(t time.Duration) { + if c.HTTPClient != nil { + c.HTTPClient.Timeout = t + } +} + +func (c *Client) checkAPIVersion() error { + serverAPIVersionString, err := c.getServerAPIVersionString() + if err != nil { + return err + } + c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString) + if err != nil { + return err + } + if c.requestedAPIVersion == nil { + c.expectedAPIVersion = c.serverAPIVersion + } else { + c.expectedAPIVersion = c.requestedAPIVersion + } + return nil +} + +// Endpoint returns the current endpoint. It's useful for getting the endpoint +// when using functions that get this data from the environment (like +// NewClientFromEnv. +func (c *Client) Endpoint() string { + return c.endpoint +} + +// Ping pings the docker server +// +// See https://goo.gl/wYfgY1 for more details. +func (c *Client) Ping() error { + return c.PingWithContext(nil) +} + +// PingWithContext pings the docker server +// The context object can be used to cancel the ping request. +// +// See https://goo.gl/wYfgY1 for more details. +func (c *Client) PingWithContext(ctx context.Context) error { + path := "/_ping" + resp, err := c.do("GET", path, doOptions{context: ctx}) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return newError(resp) + } + resp.Body.Close() + return nil +} + +func (c *Client) getServerAPIVersionString() (version string, err error) { + resp, err := c.do("GET", "/version", doOptions{}) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode) + } + var versionResponse map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil { + return "", err + } + if version, ok := (versionResponse["ApiVersion"]).(string); ok { + return version, nil + } + return "", nil +} + +type doOptions struct { + data interface{} + forceJSON bool + headers map[string]string + context context.Context +} + +func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) { + var params io.Reader + if doOptions.data != nil || doOptions.forceJSON { + buf, err := json.Marshal(doOptions.data) + if err != nil { + return nil, err + } + params = bytes.NewBuffer(buf) + } + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return nil, err + } + } + protocol := c.endpointURL.Scheme + var u string + switch protocol { + case unixProtocol, namedPipeProtocol: + u = c.getFakeNativeURL(path) + default: + u = c.getURL(path) + } + + req, err := http.NewRequest(method, u, params) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", userAgent) + if doOptions.data != nil { + req.Header.Set("Content-Type", "application/json") + } else if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + + for k, v := range doOptions.headers { + req.Header.Set(k, v) + } + + ctx := doOptions.context + if ctx == nil { + ctx = context.Background() + } + + resp, err := c.HTTPClient.Do(req.WithContext(ctx)) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return nil, ErrConnectionRefused + } + + return nil, chooseError(ctx, err) + } + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, newError(resp) + } + return resp, nil +} + +type streamOptions struct { + setRawTerminal bool + rawJSONStream bool + useJSONDecoder bool + headers map[string]string + in io.Reader + stdout io.Writer + stderr io.Writer + reqSent chan struct{} + // timeout is the initial connection timeout + timeout time.Duration + // Timeout with no data is received, it's reset every time new data + // arrives + inactivityTimeout time.Duration + context context.Context +} + +// if error in context, return that instead of generic http error +func chooseError(ctx context.Context, err error) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + return err + } +} + +func (c *Client) stream(method, path string, streamOptions streamOptions) error { + if (method == "POST" || method == "PUT") && streamOptions.in == nil { + streamOptions.in = bytes.NewReader(nil) + } + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return err + } + } + req, err := http.NewRequest(method, c.getURL(path), streamOptions.in) + if err != nil { + return err + } + req.Header.Set("User-Agent", userAgent) + if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + for key, val := range streamOptions.headers { + req.Header.Set(key, val) + } + var resp *http.Response + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if streamOptions.stdout == nil { + streamOptions.stdout = ioutil.Discard + } + if streamOptions.stderr == nil { + streamOptions.stderr = ioutil.Discard + } + + // make a sub-context so that our active cancellation does not affect parent + ctx := streamOptions.context + if ctx == nil { + ctx = context.Background() + } + subCtx, cancelRequest := context.WithCancel(ctx) + defer cancelRequest() + + if protocol == unixProtocol || protocol == namedPipeProtocol { + var dial net.Conn + dial, err = c.Dialer.Dial(protocol, address) + if err != nil { + return err + } + go func() { + <-subCtx.Done() + dial.Close() + }() + breader := bufio.NewReader(dial) + err = req.Write(dial) + if err != nil { + return chooseError(subCtx, err) + } + + // ReadResponse may hang if server does not replay + if streamOptions.timeout > 0 { + dial.SetDeadline(time.Now().Add(streamOptions.timeout)) + } + + if streamOptions.reqSent != nil { + close(streamOptions.reqSent) + } + if resp, err = http.ReadResponse(breader, req); err != nil { + // Cancel timeout for future I/O operations + if streamOptions.timeout > 0 { + dial.SetDeadline(time.Time{}) + } + if strings.Contains(err.Error(), "connection refused") { + return ErrConnectionRefused + } + + return chooseError(subCtx, err) + } + } else { + if resp, err = c.HTTPClient.Do(req.WithContext(subCtx)); err != nil { + if strings.Contains(err.Error(), "connection refused") { + return ErrConnectionRefused + } + return chooseError(subCtx, err) + } + if streamOptions.reqSent != nil { + close(streamOptions.reqSent) + } + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return newError(resp) + } + var canceled uint32 + if streamOptions.inactivityTimeout > 0 { + var ch chan<- struct{} + resp.Body, ch = handleInactivityTimeout(resp.Body, streamOptions.inactivityTimeout, cancelRequest, &canceled) + defer close(ch) + } + err = handleStreamResponse(resp, &streamOptions) + if err != nil { + if atomic.LoadUint32(&canceled) != 0 { + return ErrInactivityTimeout + } + return chooseError(subCtx, err) + } + return nil +} + +func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) error { + var err error + if !streamOptions.useJSONDecoder && resp.Header.Get("Content-Type") != "application/json" { + if streamOptions.setRawTerminal { + _, err = io.Copy(streamOptions.stdout, resp.Body) + } else { + _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body) + } + return err + } + // if we want to get raw json stream, just copy it back to output + // without decoding it + if streamOptions.rawJSONStream { + _, err = io.Copy(streamOptions.stdout, resp.Body) + return err + } + if st, ok := streamOptions.stdout.(interface { + io.Writer + FD() uintptr + IsTerminal() bool + }); ok { + err = jsonmessage.DisplayJSONMessagesToStream(resp.Body, st, nil) + } else { + err = jsonmessage.DisplayJSONMessagesStream(resp.Body, streamOptions.stdout, 0, false, nil) + } + return err +} + +type proxyReader struct { + io.ReadCloser + calls uint64 +} + +func (p *proxyReader) callCount() uint64 { + return atomic.LoadUint64(&p.calls) +} + +func (p *proxyReader) Read(data []byte) (int, error) { + atomic.AddUint64(&p.calls, 1) + return p.ReadCloser.Read(data) +} + +func handleInactivityTimeout(reader io.ReadCloser, timeout time.Duration, cancelRequest func(), canceled *uint32) (io.ReadCloser, chan<- struct{}) { + done := make(chan struct{}) + proxyReader := &proxyReader{ReadCloser: reader} + go func() { + var lastCallCount uint64 + for { + select { + case <-time.After(timeout): + case <-done: + return + } + curCallCount := proxyReader.callCount() + if curCallCount == lastCallCount { + atomic.AddUint32(canceled, 1) + cancelRequest() + return + } + lastCallCount = curCallCount + } + }() + return proxyReader, done +} + +type hijackOptions struct { + success chan struct{} + setRawTerminal bool + in io.Reader + stdout io.Writer + stderr io.Writer + data interface{} +} + +// CloseWaiter is an interface with methods for closing the underlying resource +// and then waiting for it to finish processing. +type CloseWaiter interface { + io.Closer + Wait() error +} + +type waiterFunc func() error + +func (w waiterFunc) Wait() error { return w() } + +type closerFunc func() error + +func (c closerFunc) Close() error { return c() } + +func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) { + if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { + err := c.checkAPIVersion() + if err != nil { + return nil, err + } + } + var params io.Reader + if hijackOptions.data != nil { + buf, err := json.Marshal(hijackOptions.data) + if err != nil { + return nil, err + } + params = bytes.NewBuffer(buf) + } + req, err := http.NewRequest(method, c.getURL(path), params) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol != unixProtocol && protocol != namedPipeProtocol { + protocol = "tcp" + address = c.endpointURL.Host + } + var dial net.Conn + if c.TLSConfig != nil && protocol != unixProtocol && protocol != namedPipeProtocol { + netDialer, ok := c.Dialer.(*net.Dialer) + if !ok { + return nil, ErrTLSNotSupported + } + dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig) + if err != nil { + return nil, err + } + } else { + dial, err = c.Dialer.Dial(protocol, address) + if err != nil { + return nil, err + } + } + + errs := make(chan error, 1) + quit := make(chan struct{}) + go func() { + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + clientconn.Do(req) + if hijackOptions.success != nil { + hijackOptions.success <- struct{}{} + <-hijackOptions.success + } + rwc, br := clientconn.Hijack() + defer rwc.Close() + + errChanOut := make(chan error, 1) + errChanIn := make(chan error, 2) + if hijackOptions.stdout == nil && hijackOptions.stderr == nil { + close(errChanOut) + } else { + // Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set. + // Otherwise, if the only stream you care about is stdin, your attach session + // will "hang" until the container terminates, even though you're not reading + // stdout/stderr + if hijackOptions.stdout == nil { + hijackOptions.stdout = ioutil.Discard + } + if hijackOptions.stderr == nil { + hijackOptions.stderr = ioutil.Discard + } + + go func() { + defer func() { + if hijackOptions.in != nil { + if closer, ok := hijackOptions.in.(io.Closer); ok { + closer.Close() + } + errChanIn <- nil + } + }() + + var err error + if hijackOptions.setRawTerminal { + _, err = io.Copy(hijackOptions.stdout, br) + } else { + _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br) + } + errChanOut <- err + }() + } + + go func() { + var err error + if hijackOptions.in != nil { + _, err = io.Copy(rwc, hijackOptions.in) + } + errChanIn <- err + rwc.(interface { + CloseWrite() error + }).CloseWrite() + }() + + var errIn error + select { + case errIn = <-errChanIn: + case <-quit: + } + + var errOut error + select { + case errOut = <-errChanOut: + case <-quit: + } + + if errIn != nil { + errs <- errIn + } else { + errs <- errOut + } + }() + + return struct { + closerFunc + waiterFunc + }{ + closerFunc(func() error { close(quit); return nil }), + waiterFunc(func() error { return <-errs }), + }, nil +} + +func (c *Client) getURL(path string) string { + urlStr := strings.TrimRight(c.endpointURL.String(), "/") + if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol { + urlStr = "" + } + if c.requestedAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) + } + return fmt.Sprintf("%s%s", urlStr, path) +} + +// getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX +// domain socket to the given path. +func (c *Client) getFakeNativeURL(path string) string { + u := *c.endpointURL // Copy. + + // Override URL so that net/http will not complain. + u.Scheme = "http" + u.Host = "unix.sock" // Doesn't matter what this is - it's not used. + u.Path = "" + urlStr := strings.TrimRight(u.String(), "/") + if c.requestedAPIVersion != nil { + return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) + } + return fmt.Sprintf("%s%s", urlStr, path) +} + +type jsonMessage struct { + Status string `json:"status,omitempty"` + Progress string `json:"progress,omitempty"` + Error string `json:"error,omitempty"` + Stream string `json:"stream,omitempty"` +} + +func queryString(opts interface{}) string { + if opts == nil { + return "" + } + value := reflect.ValueOf(opts) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + if value.Kind() != reflect.Struct { + return "" + } + items := url.Values(map[string][]string{}) + for i := 0; i < value.NumField(); i++ { + field := value.Type().Field(i) + if field.PkgPath != "" { + continue + } + key := field.Tag.Get("qs") + if key == "" { + key = strings.ToLower(field.Name) + } else if key == "-" { + continue + } + addQueryStringValue(items, key, value.Field(i)) + } + return items.Encode() +} + +func addQueryStringValue(items url.Values, key string, v reflect.Value) { + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + items.Add(key, "1") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.Int() > 0 { + items.Add(key, strconv.FormatInt(v.Int(), 10)) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.Uint() > 0 { + items.Add(key, strconv.FormatUint(v.Uint(), 10)) + } + case reflect.Float32, reflect.Float64: + if v.Float() > 0 { + items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + } + case reflect.String: + if v.String() != "" { + items.Add(key, v.String()) + } + case reflect.Ptr: + if !v.IsNil() { + if b, err := json.Marshal(v.Interface()); err == nil { + items.Add(key, string(b)) + } + } + case reflect.Map: + if len(v.MapKeys()) > 0 { + if b, err := json.Marshal(v.Interface()); err == nil { + items.Add(key, string(b)) + } + } + case reflect.Array, reflect.Slice: + vLen := v.Len() + if vLen > 0 { + for i := 0; i < vLen; i++ { + addQueryStringValue(items, key, v.Index(i)) + } + } + } +} + +// Error represents failures in the API. It represents a failure from the API. +type Error struct { + Status int + Message string +} + +func newError(resp *http.Response) *Error { + type ErrMsg struct { + Message string `json:"message"` + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)} + } + var emsg ErrMsg + err = json.Unmarshal(data, &emsg) + if err != nil { + return &Error{Status: resp.StatusCode, Message: string(data)} + } + return &Error{Status: resp.StatusCode, Message: emsg.Message} +} + +func (e *Error) Error() string { + return fmt.Sprintf("API error (%d): %s", e.Status, e.Message) +} + +func parseEndpoint(endpoint string, tls bool) (*url.URL, error) { + if endpoint != "" && !strings.Contains(endpoint, "://") { + endpoint = "tcp://" + endpoint + } + u, err := url.Parse(endpoint) + if err != nil { + return nil, ErrInvalidEndpoint + } + if tls && u.Scheme != "unix" { + u.Scheme = "https" + } + switch u.Scheme { + case unixProtocol, namedPipeProtocol: + return u, nil + case "http", "https", "tcp": + _, port, err := net.SplitHostPort(u.Host) + if err != nil { + if e, ok := err.(*net.AddrError); ok { + if e.Err == "missing port in address" { + return u, nil + } + } + return nil, ErrInvalidEndpoint + } + number, err := strconv.ParseInt(port, 10, 64) + if err == nil && number > 0 && number < 65536 { + if u.Scheme == "tcp" { + if tls { + u.Scheme = "https" + } else { + u.Scheme = "http" + } + } + return u, nil + } + return nil, ErrInvalidEndpoint + default: + return nil, ErrInvalidEndpoint + } +} + +type dockerEnv struct { + dockerHost string + dockerTLSVerify bool + dockerCertPath string +} + +func getDockerEnv() (*dockerEnv, error) { + dockerHost := os.Getenv("DOCKER_HOST") + var err error + if dockerHost == "" { + dockerHost = opts.DefaultHost + } + dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != "" + var dockerCertPath string + if dockerTLSVerify { + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + if dockerCertPath == "" { + home := homedir.Get() + if home == "" { + return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set") + } + dockerCertPath = filepath.Join(home, ".docker") + dockerCertPath, err = filepath.Abs(dockerCertPath) + if err != nil { + return nil, err + } + } + } + return &dockerEnv{ + dockerHost: dockerHost, + dockerTLSVerify: dockerTLSVerify, + dockerCertPath: dockerCertPath, + }, nil +} + +// defaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func defaultTransport() *http.Transport { + transport := defaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// defaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func defaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// defaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func defaultClient() *http.Client { + return &http.Client{ + Transport: defaultTransport(), + } +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/client_unix.go b/vendor/github.com/ory/dockertest/v3/docker/client_unix.go new file mode 100644 index 00000000..57d7904e --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/client_unix.go @@ -0,0 +1,32 @@ +// Copyright 2016 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !windows + +package docker + +import ( + "context" + "net" + "net/http" +) + +// initializeNativeClient initializes the native Unix domain socket client on +// Unix-style operating systems +func (c *Client) initializeNativeClient(trFunc func() *http.Transport) { + if c.endpointURL.Scheme != unixProtocol { + return + } + sockPath := c.endpointURL.Path + + tr := trFunc() + + tr.Dial = func(network, addr string) (net.Conn, error) { + return c.Dialer.Dial(unixProtocol, sockPath) + } + tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return c.Dialer.Dial(unixProtocol, sockPath) + } + c.HTTPClient.Transport = tr +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/client_windows.go b/vendor/github.com/ory/dockertest/v3/docker/client_windows.go new file mode 100644 index 00000000..8e7b457d --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/client_windows.go @@ -0,0 +1,45 @@ +// Copyright 2016 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package docker + +import ( + "context" + "net" + "net/http" + "time" + + "github.com/Microsoft/go-winio" +) + +const namedPipeConnectTimeout = 2 * time.Second + +type pipeDialer struct { + dialFunc func(network, addr string) (net.Conn, error) +} + +func (p pipeDialer) Dial(network, address string) (net.Conn, error) { + return p.dialFunc(network, address) +} + +// initializeNativeClient initializes the native Named Pipe client for Windows +func (c *Client) initializeNativeClient(trFunc func() *http.Transport) { + if c.endpointURL.Scheme != namedPipeProtocol { + return + } + namedPipePath := c.endpointURL.Path + dialFunc := func(network, addr string) (net.Conn, error) { + timeout := namedPipeConnectTimeout + return winio.DialPipe(namedPipePath, &timeout) + } + tr := trFunc() + tr.Dial = dialFunc + tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return dialFunc(network, addr) + } + c.Dialer = &pipeDialer{dialFunc} + c.HTTPClient.Transport = tr +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/container.go b/vendor/github.com/ory/dockertest/v3/docker/container.go new file mode 100644 index 00000000..e24c9fb2 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/container.go @@ -0,0 +1,1623 @@ +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/go-units" +) + +// ErrContainerAlreadyExists is the error returned by CreateContainer when the +// container already exists. +var ErrContainerAlreadyExists = errors.New("container already exists") + +// ListContainersOptions specify parameters to the ListContainers function. +// +// See https://goo.gl/kaOHGw for more details. +type ListContainersOptions struct { + All bool + Size bool + Limit int + Since string + Before string + Filters map[string][]string + Context context.Context +} + +// APIPort is a type that represents a port mapping returned by the Docker API +type APIPort struct { + PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty" toml:"PrivatePort,omitempty"` + PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty" toml:"PublicPort,omitempty"` + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` + IP string `json:"IP,omitempty" yaml:"IP,omitempty" toml:"IP,omitempty"` +} + +// APIMount represents a mount point for a container. +type APIMount struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Source string `json:"Source,omitempty" yaml:"Source,omitempty" toml:"Source,omitempty"` + Destination string `json:"Destination,omitempty" yaml:"Destination,omitempty" toml:"Destination,omitempty"` + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"` + Mode string `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"` + RW bool `json:"RW,omitempty" yaml:"RW,omitempty" toml:"RW,omitempty"` + Propogation string `json:"Propogation,omitempty" yaml:"Propogation,omitempty" toml:"Propogation,omitempty"` +} + +// APIContainers represents each container in the list returned by +// ListContainers. +type APIContainers struct { + ID string `json:"Id" yaml:"Id" toml:"Id"` + Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"` + Command string `json:"Command,omitempty" yaml:"Command,omitempty" toml:"Command,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` + State string `json:"State,omitempty" yaml:"State,omitempty" toml:"State,omitempty"` + Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` + Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty" toml:"Ports,omitempty"` + SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty" toml:"SizeRw,omitempty"` + SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty" toml:"SizeRootFs,omitempty"` + Names []string `json:"Names,omitempty" yaml:"Names,omitempty" toml:"Names,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` + Networks NetworkList `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty" toml:"NetworkSettings,omitempty"` + Mounts []APIMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` +} + +// NetworkList encapsulates a map of networks, as returned by the Docker API in +// ListContainers. +type NetworkList struct { + Networks map[string]ContainerNetwork `json:"Networks" yaml:"Networks,omitempty" toml:"Networks,omitempty"` +} + +// ListContainers returns a slice of containers matching the given criteria. +// +// See https://goo.gl/kaOHGw for more details. +func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { + path := "/containers/json?" + queryString(opts) + resp, err := c.do("GET", path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var containers []APIContainers + if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil { + return nil, err + } + return containers, nil +} + +// Port represents the port number and the protocol, in the form +// /. For example: 80/tcp. +type Port string + +// Port returns the number of the port. +func (p Port) Port() string { + return strings.Split(string(p), "/")[0] +} + +// Proto returns the name of the protocol. +func (p Port) Proto() string { + parts := strings.Split(string(p), "/") + if len(parts) == 1 { + return "tcp" + } + return parts[1] +} + +// HealthCheck represents one check of health. +type HealthCheck struct { + Start time.Time `json:"Start,omitempty" yaml:"Start,omitempty" toml:"Start,omitempty"` + End time.Time `json:"End,omitempty" yaml:"End,omitempty" toml:"End,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"` + Output string `json:"Output,omitempty" yaml:"Output,omitempty" toml:"Output,omitempty"` +} + +// Health represents the health of a container. +type Health struct { + Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` + FailingStreak int `json:"FailingStreak,omitempty" yaml:"FailingStreak,omitempty" toml:"FailingStreak,omitempty"` + Log []HealthCheck `json:"Log,omitempty" yaml:"Log,omitempty" toml:"Log,omitempty"` +} + +// State represents the state of a container. +type State struct { + Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"` + Running bool `json:"Running,omitempty" yaml:"Running,omitempty" toml:"Running,omitempty"` + Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty" toml:"Paused,omitempty"` + Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty" toml:"Restarting,omitempty"` + OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty" toml:"OOMKilled,omitempty"` + RemovalInProgress bool `json:"RemovalInProgress,omitempty" yaml:"RemovalInProgress,omitempty" toml:"RemovalInProgress,omitempty"` + Dead bool `json:"Dead,omitempty" yaml:"Dead,omitempty" toml:"Dead,omitempty"` + Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty" toml:"Pid,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"` + Error string `json:"Error,omitempty" yaml:"Error,omitempty" toml:"Error,omitempty"` + StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty" toml:"StartedAt,omitempty"` + FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty" toml:"FinishedAt,omitempty"` + Health Health `json:"Health,omitempty" yaml:"Health,omitempty" toml:"Health,omitempty"` +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// PortBinding represents the host/container port mapping as returned in the +// `docker inspect` json +type PortBinding struct { + HostIP string `json:"HostIp,omitempty" yaml:"HostIp,omitempty" toml:"HostIp,omitempty"` + HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty" toml:"HostPort,omitempty"` +} + +// PortMapping represents a deprecated field in the `docker inspect` output, +// and its value as found in NetworkSettings should always be nil +type PortMapping map[string]string + +// ContainerNetwork represents the networking settings of a container per network. +type ContainerNetwork struct { + Aliases []string `json:"Aliases,omitempty" yaml:"Aliases,omitempty" toml:"Aliases,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` + GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` + IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"` + IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"` + IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"` + Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"` + EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"` + NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"` +} + +// NetworkSettings contains network-related information about a container +type NetworkSettings struct { + Networks map[string]ContainerNetwork `json:"Networks,omitempty" yaml:"Networks,omitempty" toml:"Networks,omitempty"` + IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"` + IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` + Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"` + Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty" toml:"Bridge,omitempty"` + PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty" toml:"PortMapping,omitempty"` + Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty" toml:"Ports,omitempty"` + NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"` + EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"` + SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty" toml:"SandboxKey,omitempty"` + GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` + IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"` + LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty" toml:"LinkLocalIPv6Address,omitempty"` + LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty" toml:"LinkLocalIPv6PrefixLen,omitempty"` + SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty" toml:"SecondaryIPAddresses,omitempty"` + SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty" toml:"SecondaryIPv6Addresses,omitempty"` +} + +// PortMappingAPI translates the port mappings as contained in NetworkSettings +// into the format in which they would appear when returned by the API +func (settings *NetworkSettings) PortMappingAPI() []APIPort { + var mapping []APIPort + for port, bindings := range settings.Ports { + p, _ := parsePort(port.Port()) + if len(bindings) == 0 { + mapping = append(mapping, APIPort{ + PrivatePort: int64(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + p, _ := parsePort(port.Port()) + h, _ := parsePort(binding.HostPort) + mapping = append(mapping, APIPort{ + PrivatePort: int64(p), + PublicPort: int64(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + return mapping +} + +func parsePort(rawPort string) (int, error) { + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// Config is the list of configuration options used when creating a container. +// Config does not contain the options that are specific to starting a container on a +// given host. Those are contained in HostConfig +type Config struct { + Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty" toml:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty" toml:"Domainname,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty" toml:"MemorySwap,omitempty"` + MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty" toml:"MemoryReservation,omitempty"` + KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty" toml:"KernelMemory,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty" toml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty" toml:"Cpuset,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty" toml:"PortSpecs,omitempty"` + ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty" toml:"ExposedPorts,omitempty"` + PublishService string `json:"PublishService,omitempty" yaml:"PublishService,omitempty" toml:"PublishService,omitempty"` + StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty" toml:"StopSignal,omitempty"` + StopTimeout int `json:"StopTimeout,omitempty" yaml:"StopTimeout,omitempty" toml:"StopTimeout,omitempty"` + Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"` + Cmd []string `json:"Cmd" yaml:"Cmd" toml:"Cmd"` + Shell []string `json:"Shell,omitempty" yaml:"Shell,omitempty" toml:"Shell,omitempty"` + Healthcheck *HealthConfig `json:"Healthcheck,omitempty" yaml:"Healthcheck,omitempty" toml:"Healthcheck,omitempty"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty" toml:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty" toml:"Volumes,omitempty"` + VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty" toml:"VolumeDriver,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty" toml:"WorkingDir,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` + Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint" toml:"Entrypoint"` + SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty" toml:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty" toml:"OnBuild,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"` + ArgsEscaped bool `json:"ArgsEscaped,omitempty" yaml:"ArgsEscaped,omitempty" toml:"ArgsEscaped,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty" toml:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty" toml:"StdinOnce,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty" toml:"NetworkDisabled,omitempty"` + + // This is no longer used and has been kept here for backward + // compatibility, please use HostConfig.VolumesFrom. + VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty" toml:"VolumesFrom,omitempty"` +} + +// HostMount represents a mount point in the container in HostConfig. +// +// It has been added in the version 1.25 of the Docker API +type HostMount struct { + Target string `json:"Target,omitempty" yaml:"Target,omitempty" toml:"Target,omitempty"` + Source string `json:"Source,omitempty" yaml:"Source,omitempty" toml:"Source,omitempty"` + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` + ReadOnly bool `json:"ReadOnly,omitempty" yaml:"ReadOnly,omitempty" toml:"ReadOnly,omitempty"` + BindOptions *BindOptions `json:"BindOptions,omitempty" yaml:"BindOptions,omitempty" toml:"BindOptions,omitempty"` + VolumeOptions *VolumeOptions `json:"VolumeOptions,omitempty" yaml:"VolumeOptions,omitempty" toml:"VolumeOptions,omitempty"` + TempfsOptions *TempfsOptions `json:"TempfsOptions,omitempty" yaml:"TempfsOptions,omitempty" toml:"TempfsOptions,omitempty"` +} + +// BindOptions contains optional configuration for the bind type +type BindOptions struct { + Propagation string `json:"Propagation,omitempty" yaml:"Propagation,omitempty" toml:"Propagation,omitempty"` +} + +// VolumeOptions contains optional configuration for the volume type +type VolumeOptions struct { + NoCopy bool `json:"NoCopy,omitempty" yaml:"NoCopy,omitempty" toml:"NoCopy,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` + DriverConfig VolumeDriverConfig `json:"DriverConfig,omitempty" yaml:"DriverConfig,omitempty" toml:"DriverConfig,omitempty"` +} + +// TempfsOptions contains optional configuration for the tempfs type +type TempfsOptions struct { + SizeBytes int64 `json:"SizeBytes,omitempty" yaml:"SizeBytes,omitempty" toml:"SizeBytes,omitempty"` + Mode int `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"` +} + +// VolumeDriverConfig holds a map of volume driver specific options +type VolumeDriverConfig struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Options map[string]string `json:"Options,omitempty" yaml:"Options,omitempty" toml:"Options,omitempty"` +} + +// Mount represents a mount point in the container. +// +// It has been added in the version 1.20 of the Docker API, available since +// Docker 1.8. +type Mount struct { + Name string + Source string + Destination string + Driver string + Mode string + RW bool +} + +// LogConfig defines the log driver type and the configuration for it. +type LogConfig struct { + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` + Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` +} + +// ULimit defines system-wide resource limitations This can help a lot in +// system administration, e.g. when a user starts too many processes and +// therefore makes the system unresponsive for other users. +type ULimit struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty" toml:"Soft,omitempty"` + Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty" toml:"Hard,omitempty"` +} + +// SwarmNode containers information about which Swarm node the container is on. +type SwarmNode struct { + ID string `json:"ID,omitempty" yaml:"ID,omitempty" toml:"ID,omitempty"` + IP string `json:"IP,omitempty" yaml:"IP,omitempty" toml:"IP,omitempty"` + Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty" toml:"Addr,omitempty"` + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty" toml:"CPUs,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` +} + +// GraphDriver contains information about the GraphDriver used by the +// container. +type GraphDriver struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Data map[string]string `json:"Data,omitempty" yaml:"Data,omitempty" toml:"Data,omitempty"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature +// +// It has been added in the version 1.24 of the Docker API, available since +// Docker 1.12. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:"Test,omitempty" yaml:"Test,omitempty" toml:"Test,omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:"Interval,omitempty" yaml:"Interval,omitempty" toml:"Interval,omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:"Timeout,omitempty" yaml:"Timeout,omitempty" toml:"Timeout,omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:"StartPeriod,omitempty" yaml:"StartPeriod,omitempty" toml:"StartPeriod,omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:"Retries,omitempty" yaml:"Retries,omitempty" toml:"Retries,omitempty"` +} + +// Container is the type encompasing everything about a container - its config, +// hostconfig, etc. +type Container struct { + ID string `json:"Id" yaml:"Id" toml:"Id"` + + Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` + + Path string `json:"Path,omitempty" yaml:"Path,omitempty" toml:"Path,omitempty"` + Args []string `json:"Args,omitempty" yaml:"Args,omitempty" toml:"Args,omitempty"` + + Config *Config `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` + State State `json:"State,omitempty" yaml:"State,omitempty" toml:"State,omitempty"` + Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"` + + Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty" toml:"Node,omitempty"` + + NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty" toml:"NetworkSettings,omitempty"` + + SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty" toml:"SysInitPath,omitempty"` + ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty" toml:"ResolvConfPath,omitempty"` + HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty" toml:"HostnamePath,omitempty"` + HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty" toml:"HostsPath,omitempty"` + LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty" toml:"LogPath,omitempty"` + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` + + Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty" toml:"Volumes,omitempty"` + VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty" toml:"VolumesRW,omitempty"` + HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" toml:"HostConfig,omitempty"` + ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty" toml:"ExecIDs,omitempty"` + GraphDriver *GraphDriver `json:"GraphDriver,omitempty" yaml:"GraphDriver,omitempty" toml:"GraphDriver,omitempty"` + + RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty" toml:"RestartCount,omitempty"` + + AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty" toml:"AppArmorProfile,omitempty"` +} + +// UpdateContainerOptions specify parameters to the UpdateContainer function. +// +// See https://goo.gl/Y6fXUy for more details. +type UpdateContainerOptions struct { + BlkioWeight int `json:"BlkioWeight"` + CPUShares int `json:"CpuShares"` + CPUPeriod int `json:"CpuPeriod"` + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` + CPUQuota int `json:"CpuQuota"` + CpusetCpus string `json:"CpusetCpus"` + CpusetMems string `json:"CpusetMems"` + Memory int `json:"Memory"` + MemorySwap int `json:"MemorySwap"` + MemoryReservation int `json:"MemoryReservation"` + KernelMemory int `json:"KernelMemory"` + RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty"` + Context context.Context +} + +// UpdateContainer updates the container at ID with the options +// +// See https://goo.gl/Y6fXUy for more details. +func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error { + resp, err := c.do("POST", fmt.Sprintf("/containers/"+id+"/update"), doOptions{ + data: opts, + forceJSON: true, + context: opts.Context, + }) + if err != nil { + return err + } + defer resp.Body.Close() + return nil +} + +// RenameContainerOptions specify parameters to the RenameContainer function. +// +// See https://goo.gl/46inai for more details. +type RenameContainerOptions struct { + // ID of container to rename + ID string `qs:"-"` + + // New name + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Context context.Context +} + +// RenameContainer updates and existing containers name +// +// See https://goo.gl/46inai for more details. +func (c *Client) RenameContainer(opts RenameContainerOptions) error { + resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{ + context: opts.Context, + }) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// InspectContainer returns information about a container by its ID. +// +// See https://goo.gl/FaI5JT for more details. +func (c *Client) InspectContainer(id string) (*Container, error) { + return c.inspectContainer(id, doOptions{}) +} + +// InspectContainerWithContext returns information about a container by its ID. +// The context object can be used to cancel the inspect request. +// +// See https://goo.gl/FaI5JT for more details. +func (c *Client) InspectContainerWithContext(id string, ctx context.Context) (*Container, error) { + return c.inspectContainer(id, doOptions{context: ctx}) +} + +func (c *Client) inspectContainer(id string, opts doOptions) (*Container, error) { + path := "/containers/" + id + "/json" + resp, err := c.do("GET", path, opts) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var container Container + if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { + return nil, err + } + return &container, nil +} + +// ContainerChanges returns changes in the filesystem of the given container. +// +// See https://goo.gl/15KKzh for more details. +func (c *Client) ContainerChanges(id string) ([]Change, error) { + path := "/containers/" + id + "/changes" + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var changes []Change + if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil { + return nil, err + } + return changes, nil +} + +// CreateContainerOptions specify parameters to the CreateContainer function. +// +// See https://goo.gl/tyzwVM for more details. +type CreateContainerOptions struct { + Name string + Config *Config `qs:"-"` + HostConfig *HostConfig `qs:"-"` + NetworkingConfig *NetworkingConfig `qs:"-"` + Context context.Context +} + +// CreateContainer creates a new container, returning the container instance, +// or an error in case of failure. +// +// The returned container instance contains only the container ID. To get more +// details about the container after creating it, use InspectContainer. +// +// See https://goo.gl/tyzwVM for more details. +func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { + path := "/containers/create?" + queryString(opts) + resp, err := c.do( + "POST", + path, + doOptions{ + data: struct { + *Config + HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" toml:"HostConfig,omitempty"` + NetworkingConfig *NetworkingConfig `json:"NetworkingConfig,omitempty" yaml:"NetworkingConfig,omitempty" toml:"NetworkingConfig,omitempty"` + }{ + opts.Config, + opts.HostConfig, + opts.NetworkingConfig, + }, + context: opts.Context, + }, + ) + + if e, ok := err.(*Error); ok { + if e.Status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + if e.Status == http.StatusConflict { + return nil, ErrContainerAlreadyExists + } + // Workaround for 17.09 bug returning 400 instead of 409. + // See https://github.com/moby/moby/issues/35021 + if e.Status == http.StatusBadRequest && strings.Contains(e.Message, "Conflict.") { + return nil, ErrContainerAlreadyExists + } + } + + if err != nil { + return nil, err + } + defer resp.Body.Close() + var container Container + if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { + return nil, err + } + + container.Name = opts.Name + + return &container, nil +} + +// KeyValuePair is a type for generic key/value pairs as used in the Lxc +// configuration +type KeyValuePair struct { + Key string `json:"Key,omitempty" yaml:"Key,omitempty" toml:"Key,omitempty"` + Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` +} + +// RestartPolicy represents the policy for automatically restarting a container. +// +// Possible values are: +// +// - always: the docker daemon will always restart the container +// - on-failure: the docker daemon will restart the container on failures, at +// most MaximumRetryCount times +// - unless-stopped: the docker daemon will always restart the container except +// when user has manually stopped the container +// - no: the docker daemon will not restart the container automatically +type RestartPolicy struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty" toml:"MaximumRetryCount,omitempty"` +} + +// AlwaysRestart returns a restart policy that tells the Docker daemon to +// always restart the container. +func AlwaysRestart() RestartPolicy { + return RestartPolicy{Name: "always"} +} + +// RestartOnFailure returns a restart policy that tells the Docker daemon to +// restart the container on failures, trying at most maxRetry times. +func RestartOnFailure(maxRetry int) RestartPolicy { + return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry} +} + +// RestartUnlessStopped returns a restart policy that tells the Docker daemon to +// always restart the container except when user has manually stopped the container. +func RestartUnlessStopped() RestartPolicy { + return RestartPolicy{Name: "unless-stopped"} +} + +// NeverRestart returns a restart policy that tells the Docker daemon to never +// restart the container on failures. +func NeverRestart() RestartPolicy { + return RestartPolicy{Name: "no"} +} + +// Device represents a device mapping between the Docker host and the +// container. +type Device struct { + PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty" toml:"PathOnHost,omitempty"` + PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty" toml:"PathInContainer,omitempty"` + CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty" toml:"CgroupPermissions,omitempty"` +} + +// BlockWeight represents a relative device weight for an individual device inside +// of a container +type BlockWeight struct { + Path string `json:"Path,omitempty"` + Weight string `json:"Weight,omitempty"` +} + +// BlockLimit represents a read/write limit in IOPS or Bandwidth for a device +// inside of a container +type BlockLimit struct { + Path string `json:"Path,omitempty"` + Rate int64 `json:"Rate,omitempty"` +} + +// HostConfig contains the container options related to starting a container on +// a given host +type HostConfig struct { + Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty" toml:"Binds,omitempty"` + CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty" toml:"CapAdd,omitempty"` + CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty" toml:"CapDrop,omitempty"` + GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty" toml:"GroupAdd,omitempty"` + ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty" toml:"ContainerIDFile,omitempty"` + LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty" toml:"LxcConf,omitempty"` + PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty" toml:"PortBindings,omitempty"` + Links []string `json:"Links,omitempty" yaml:"Links,omitempty" toml:"Links,omitempty"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty" toml:"Dns,omitempty"` // For Docker API v1.10 and above only + DNSOptions []string `json:"DnsOptions,omitempty" yaml:"DnsOptions,omitempty" toml:"DnsOptions,omitempty"` + DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty" toml:"DnsSearch,omitempty"` + ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty" toml:"ExtraHosts,omitempty"` + VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty" toml:"VolumesFrom,omitempty"` + UsernsMode string `json:"UsernsMode,omitempty" yaml:"UsernsMode,omitempty" toml:"UsernsMode,omitempty"` + NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty" toml:"NetworkMode,omitempty"` + IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty" toml:"IpcMode,omitempty"` + PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty" toml:"PidMode,omitempty"` + UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty" toml:"UTSMode,omitempty"` + RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty" toml:"RestartPolicy,omitempty"` + Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty" toml:"Devices,omitempty"` + DeviceCgroupRules []string `json:"DeviceCgroupRules,omitempty" yaml:"DeviceCgroupRules,omitempty" toml:"DeviceCgroupRules,omitempty"` + LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty" toml:"LogConfig,omitempty"` + SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty" toml:"SecurityOpt,omitempty"` + Cgroup string `json:"Cgroup,omitempty" yaml:"Cgroup,omitempty" toml:"Cgroup,omitempty"` + CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty" toml:"CgroupParent,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"` + MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty" toml:"MemoryReservation,omitempty"` + KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty" toml:"KernelMemory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty" toml:"MemorySwap,omitempty"` + MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty" toml:"MemorySwappiness,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty" toml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty" toml:"Cpuset,omitempty"` + CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty" toml:"CpusetCpus,omitempty"` + CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty" toml:"CpusetMems,omitempty"` + CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty" toml:"CpuQuota,omitempty"` + CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty" toml:"CpuPeriod,omitempty"` + CPURealtimePeriod int64 `json:"CpuRealtimePeriod,omitempty" yaml:"CpuRealtimePeriod,omitempty" toml:"CpuRealtimePeriod,omitempty"` + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime,omitempty" yaml:"CpuRealtimeRuntime,omitempty" toml:"CpuRealtimeRuntime,omitempty"` + BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight,omitempty" toml:"BlkioWeight,omitempty"` + BlkioWeightDevice []BlockWeight `json:"BlkioWeightDevice,omitempty" yaml:"BlkioWeightDevice,omitempty" toml:"BlkioWeightDevice,omitempty"` + BlkioDeviceReadBps []BlockLimit `json:"BlkioDeviceReadBps,omitempty" yaml:"BlkioDeviceReadBps,omitempty" toml:"BlkioDeviceReadBps,omitempty"` + BlkioDeviceReadIOps []BlockLimit `json:"BlkioDeviceReadIOps,omitempty" yaml:"BlkioDeviceReadIOps,omitempty" toml:"BlkioDeviceReadIOps,omitempty"` + BlkioDeviceWriteBps []BlockLimit `json:"BlkioDeviceWriteBps,omitempty" yaml:"BlkioDeviceWriteBps,omitempty" toml:"BlkioDeviceWriteBps,omitempty"` + BlkioDeviceWriteIOps []BlockLimit `json:"BlkioDeviceWriteIOps,omitempty" yaml:"BlkioDeviceWriteIOps,omitempty" toml:"BlkioDeviceWriteIOps,omitempty"` + Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty" toml:"Ulimits,omitempty"` + VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty" toml:"VolumeDriver,omitempty"` + OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty" toml:"OomScoreAdj,omitempty"` + PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty" toml:"PidsLimit,omitempty"` + ShmSize int64 `json:"ShmSize,omitempty" yaml:"ShmSize,omitempty" toml:"ShmSize,omitempty"` + Tmpfs map[string]string `json:"Tmpfs,omitempty" yaml:"Tmpfs,omitempty" toml:"Tmpfs,omitempty"` + Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"` + PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty" toml:"PublishAllPorts,omitempty"` + ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty" toml:"ReadonlyRootfs,omitempty"` + OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable,omitempty" toml:"OomKillDisable,omitempty"` + AutoRemove bool `json:"AutoRemove,omitempty" yaml:"AutoRemove,omitempty" toml:"AutoRemove,omitempty"` + StorageOpt map[string]string `json:"StorageOpt,omitempty" yaml:"StorageOpt,omitempty" toml:"StorageOpt,omitempty"` + Sysctls map[string]string `json:"Sysctls,omitempty" yaml:"Sysctls,omitempty" toml:"Sysctls,omitempty"` + CPUCount int64 `json:"CpuCount,omitempty" yaml:"CpuCount,omitempty"` + CPUPercent int64 `json:"CpuPercent,omitempty" yaml:"CpuPercent,omitempty"` + IOMaximumBandwidth int64 `json:"IOMaximumBandwidth,omitempty" yaml:"IOMaximumBandwidth,omitempty"` + IOMaximumIOps int64 `json:"IOMaximumIOps,omitempty" yaml:"IOMaximumIOps,omitempty"` + Mounts []HostMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` + Init bool `json:",omitempty" yaml:",omitempty"` +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointConfig `json:"EndpointsConfig" yaml:"EndpointsConfig" toml:"EndpointsConfig"` // Endpoint configs for each connecting network +} + +// StartContainer starts a container, returning an error in case of failure. +// +// Passing the HostConfig to this method has been deprecated in Docker API 1.22 +// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine +// 1.12.x). The client will ignore the parameter when communicating with Docker +// API 1.24 or greater. +// +// See https://goo.gl/fbOSZy for more details. +func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { + return c.startContainer(id, hostConfig, doOptions{}) +} + +// StartContainerWithContext starts a container, returning an error in case of +// failure. The context can be used to cancel the outstanding start container +// request. +// +// Passing the HostConfig to this method has been deprecated in Docker API 1.22 +// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine +// 1.12.x). The client will ignore the parameter when communicating with Docker +// API 1.24 or greater. +// +// See https://goo.gl/fbOSZy for more details. +func (c *Client) StartContainerWithContext(id string, hostConfig *HostConfig, ctx context.Context) error { + return c.startContainer(id, hostConfig, doOptions{context: ctx}) +} + +func (c *Client) startContainer(id string, hostConfig *HostConfig, opts doOptions) error { + path := "/containers/" + id + "/start" + if c.serverAPIVersion == nil { + c.checkAPIVersion() + } + if c.serverAPIVersion != nil && c.serverAPIVersion.LessThan(apiVersion124) { + opts.data = hostConfig + opts.forceJSON = true + } + resp, err := c.do("POST", path, opts) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id, Err: err} + } + return err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return &ContainerAlreadyRunning{ID: id} + } + return nil +} + +// StopContainer stops a container, killing it after the given timeout (in +// seconds). +// +// See https://goo.gl/R9dZcV for more details. +func (c *Client) StopContainer(id string, timeout uint) error { + return c.stopContainer(id, timeout, doOptions{}) +} + +// StopContainerWithContext stops a container, killing it after the given +// timeout (in seconds). The context can be used to cancel the stop +// container request. +// +// See https://goo.gl/R9dZcV for more details. +func (c *Client) StopContainerWithContext(id string, timeout uint, ctx context.Context) error { + return c.stopContainer(id, timeout, doOptions{context: ctx}) +} + +func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error { + path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) + resp, err := c.do("POST", path, opts) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return &ContainerNotRunning{ID: id} + } + return nil +} + +// RestartContainer stops a container, killing it after the given timeout (in +// seconds), during the stop process. +// +// See https://goo.gl/MrAKQ5 for more details. +func (c *Client) RestartContainer(id string, timeout uint) error { + path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) + resp, err := c.do("POST", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + resp.Body.Close() + return nil +} + +// PauseContainer pauses the given container. +// +// See https://goo.gl/D1Yaii for more details. +func (c *Client) PauseContainer(id string) error { + path := fmt.Sprintf("/containers/%s/pause", id) + resp, err := c.do("POST", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + resp.Body.Close() + return nil +} + +// UnpauseContainer unpauses the given container. +// +// See https://goo.gl/sZ2faO for more details. +func (c *Client) UnpauseContainer(id string) error { + path := fmt.Sprintf("/containers/%s/unpause", id) + resp, err := c.do("POST", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: id} + } + return err + } + resp.Body.Close() + return nil +} + +// TopResult represents the list of processes running in a container, as +// returned by /containers//top. +// +// See https://goo.gl/FLwpPl for more details. +type TopResult struct { + Titles []string + Processes [][]string +} + +// TopContainer returns processes running inside a container +// +// See https://goo.gl/FLwpPl for more details. +func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { + var args string + var result TopResult + if psArgs != "" { + args = fmt.Sprintf("?ps_args=%s", psArgs) + } + path := fmt.Sprintf("/containers/%s/top%s", id, args) + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return result, &NoSuchContainer{ID: id} + } + return result, err + } + defer resp.Body.Close() + err = json.NewDecoder(resp.Body).Decode(&result) + return result, err +} + +// Stats represents container statistics, returned by /containers//stats. +// +// See https://goo.gl/Dk3Xio for more details. +type Stats struct { + Read time.Time `json:"read,omitempty" yaml:"read,omitempty" toml:"read,omitempty"` + PreRead time.Time `json:"preread,omitempty" yaml:"preread,omitempty" toml:"preread,omitempty"` + NumProcs uint32 `json:"num_procs" yaml:"num_procs" toml:"num_procs"` + PidsStats struct { + Current uint64 `json:"current,omitempty" yaml:"current,omitempty"` + } `json:"pids_stats,omitempty" yaml:"pids_stats,omitempty" toml:"pids_stats,omitempty"` + Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty" toml:"network,omitempty"` + Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty" toml:"networks,omitempty"` + MemoryStats struct { + Stats struct { + TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty" toml:"total_pgmafault,omitempty"` + Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty" toml:"cache,omitempty"` + MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty" toml:"mapped_file,omitempty"` + TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty" toml:"total_inactive_file,omitempty"` + Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty" toml:"pgpgout,omitempty"` + Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty" toml:"rss,omitempty"` + TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty" toml:"total_mapped_file,omitempty"` + Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty" toml:"writeback,omitempty"` + Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty" toml:"unevictable,omitempty"` + Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty" toml:"pgpgin,omitempty"` + TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty" toml:"total_unevictable,omitempty"` + Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty" toml:"pgmajfault,omitempty"` + TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty" toml:"total_rss,omitempty"` + TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty" toml:"total_rss_huge,omitempty"` + TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty" toml:"total_writeback,omitempty"` + TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty" toml:"total_inactive_anon,omitempty"` + RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty" toml:"rss_huge,omitempty"` + HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty" toml:"hierarchical_memory_limit,omitempty"` + TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty" toml:"total_pgfault,omitempty"` + TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty" toml:"total_active_file,omitempty"` + ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty" toml:"active_anon,omitempty"` + TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty" toml:"total_active_anon,omitempty"` + TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty" toml:"total_pgpgout,omitempty"` + TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty" toml:"total_cache,omitempty"` + InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty" toml:"inactive_anon,omitempty"` + ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty" toml:"active_file,omitempty"` + Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty" toml:"pgfault,omitempty"` + InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty" toml:"inactive_file,omitempty"` + TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty" toml:"total_pgpgin,omitempty"` + HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty" toml:"hierarchical_memsw_limit,omitempty"` + Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty" toml:"swap,omitempty"` + } `json:"stats,omitempty" yaml:"stats,omitempty" toml:"stats,omitempty"` + MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty" toml:"max_usage,omitempty"` + Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty" toml:"usage,omitempty"` + Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty" toml:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty" toml:"limit,omitempty"` + Commit uint64 `json:"commitbytes,omitempty" yaml:"commitbytes,omitempty" toml:"privateworkingset,omitempty"` + CommitPeak uint64 `json:"commitpeakbytes,omitempty" yaml:"commitpeakbytes,omitempty" toml:"commitpeakbytes,omitempty"` + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty" yaml:"privateworkingset,omitempty" toml:"privateworkingset,omitempty"` + } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty" toml:"memory_stats,omitempty"` + BlkioStats struct { + IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty" toml:"io_service_bytes_recursive,omitempty"` + IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty" toml:"io_serviced_recursive,omitempty"` + IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty" toml:"io_queue_recursive,omitempty"` + IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty" toml:"io_service_time_recursive,omitempty"` + IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty" toml:"io_wait_time_recursive,omitempty"` + IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty" toml:"io_merged_recursive,omitempty"` + IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty" toml:"io_time_recursive,omitempty"` + SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty" toml:"sectors_recursive,omitempty"` + } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty" toml:"blkio_stats,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty" toml:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` + StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty" yaml:"read_count_normalized,omitempty" toml:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty" yaml:"read_size_bytes,omitempty" toml:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty" yaml:"write_count_normalized,omitempty" toml:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty" yaml:"write_size_bytes,omitempty" toml:"write_size_bytes,omitempty"` + } `json:"storage_stats,omitempty" yaml:"storage_stats,omitempty" toml:"storage_stats,omitempty"` +} + +// NetworkStats is a stats entry for network stats +type NetworkStats struct { + RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty" toml:"rx_dropped,omitempty"` + RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty" toml:"rx_bytes,omitempty"` + RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty" toml:"rx_errors,omitempty"` + TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty" toml:"tx_packets,omitempty"` + TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty" toml:"tx_dropped,omitempty"` + RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty" toml:"rx_packets,omitempty"` + TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty" toml:"tx_errors,omitempty"` + TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty" toml:"tx_bytes,omitempty"` +} + +// CPUStats is a stats entry for cpu stats +type CPUStats struct { + CPUUsage struct { + PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty" toml:"percpu_usage,omitempty"` + UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty" toml:"usage_in_usermode,omitempty"` + TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty" toml:"total_usage,omitempty"` + UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty" toml:"usage_in_kernelmode,omitempty"` + } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty" toml:"cpu_usage,omitempty"` + SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty" toml:"system_cpu_usage,omitempty"` + OnlineCPUs uint64 `json:"online_cpus,omitempty" yaml:"online_cpus,omitempty" toml:"online_cpus,omitempty"` + ThrottlingData struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + ThrottledTime uint64 `json:"throttled_time,omitempty"` + } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty" toml:"throttling_data,omitempty"` +} + +// BlkioStatsEntry is a stats entry for blkio_stats +type BlkioStatsEntry struct { + Major uint64 `json:"major,omitempty" yaml:"major,omitempty" toml:"major,omitempty"` + Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty" toml:"minor,omitempty"` + Op string `json:"op,omitempty" yaml:"op,omitempty" toml:"op,omitempty"` + Value uint64 `json:"value,omitempty" yaml:"value,omitempty" toml:"value,omitempty"` +} + +// StatsOptions specify parameters to the Stats function. +// +// See https://goo.gl/Dk3Xio for more details. +type StatsOptions struct { + ID string + Stats chan<- *Stats + Stream bool + // A flag that enables stopping the stats operation + Done <-chan bool + // Initial connection timeout + Timeout time.Duration + // Timeout with no data is received, it's reset every time new data + // arrives + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// Stats sends container statistics for the given container to the given channel. +// +// This function is blocking, similar to a streaming call for logs, and should be run +// on a separate goroutine from the caller. Note that this function will block until +// the given container is removed, not just exited. When finished, this function +// will close the given channel. Alternatively, function can be stopped by +// signaling on the Done channel. +// +// See https://goo.gl/Dk3Xio for more details. +func (c *Client) Stats(opts StatsOptions) (retErr error) { + errC := make(chan error, 1) + readCloser, writeCloser := io.Pipe() + + defer func() { + close(opts.Stats) + + select { + case err := <-errC: + if err != nil && retErr == nil { + retErr = err + } + default: + // No errors + } + + if err := readCloser.Close(); err != nil && retErr == nil { + retErr = err + } + }() + + reqSent := make(chan struct{}) + go func() { + err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ + rawJSONStream: true, + useJSONDecoder: true, + stdout: writeCloser, + timeout: opts.Timeout, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + reqSent: reqSent, + }) + if err != nil { + dockerError, ok := err.(*Error) + if ok { + if dockerError.Status == http.StatusNotFound { + err = &NoSuchContainer{ID: opts.ID} + } + } + } + if closeErr := writeCloser.Close(); closeErr != nil && err == nil { + err = closeErr + } + errC <- err + close(errC) + }() + + quit := make(chan struct{}) + defer close(quit) + go func() { + // block here waiting for the signal to stop function + select { + case <-opts.Done: + readCloser.Close() + case <-quit: + return + } + }() + + decoder := json.NewDecoder(readCloser) + stats := new(Stats) + <-reqSent + for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) { + if err != nil { + return err + } + opts.Stats <- stats + stats = new(Stats) + } + return nil +} + +// KillContainerOptions represents the set of options that can be used in a +// call to KillContainer. +// +// See https://goo.gl/JnTxXZ for more details. +type KillContainerOptions struct { + // The ID of the container. + ID string `qs:"-"` + + // The signal to send to the container. When omitted, Docker server + // will assume SIGKILL. + Signal Signal + Context context.Context +} + +// KillContainer sends a signal to a container, returning an error in case of +// failure. +// +// See https://goo.gl/JnTxXZ for more details. +func (c *Client) KillContainer(opts KillContainerOptions) error { + path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{context: opts.Context}) + if err != nil { + e, ok := err.(*Error) + if !ok { + return err + } + switch e.Status { + case http.StatusNotFound: + return &NoSuchContainer{ID: opts.ID} + case http.StatusConflict: + return &ContainerNotRunning{ID: opts.ID} + default: + return err + } + } + resp.Body.Close() + return nil +} + +// RemoveContainerOptions encapsulates options to remove a container. +// +// See https://goo.gl/hL5IPC for more details. +type RemoveContainerOptions struct { + // The ID of the container. + ID string `qs:"-"` + + // A flag that indicates whether Docker should remove the volumes + // associated to the container. + RemoveVolumes bool `qs:"v"` + + // A flag that indicates whether Docker should remove the container + // even if it is currently running. + Force bool + Context context.Context +} + +// RemoveContainer removes a container, returning an error in case of failure. +// +// See https://goo.gl/hL5IPC for more details. +func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { + path := "/containers/" + opts.ID + "?" + queryString(opts) + resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.ID} + } + return err + } + resp.Body.Close() + return nil +} + +// UploadToContainerOptions is the set of options that can be used when +// uploading an archive into a container. +// +// See https://goo.gl/g25o7u for more details. +type UploadToContainerOptions struct { + InputStream io.Reader `json:"-" qs:"-"` + Path string `qs:"path"` + NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"` + Context context.Context +} + +// UploadToContainer uploads a tar archive to be extracted to a path in the +// filesystem of the container. +// +// See https://goo.gl/g25o7u for more details. +func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error { + url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) + + return c.stream("PUT", url, streamOptions{ + in: opts.InputStream, + context: opts.Context, + }) +} + +// DownloadFromContainerOptions is the set of options that can be used when +// downloading resources from a container. +// +// See https://goo.gl/W49jxK for more details. +type DownloadFromContainerOptions struct { + OutputStream io.Writer `json:"-" qs:"-"` + Path string `qs:"path"` + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// DownloadFromContainer downloads a tar archive of files or folders in a container. +// +// See https://goo.gl/W49jxK for more details. +func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error { + url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) + + return c.stream("GET", url, streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} + +// CopyFromContainerOptions contains the set of options used for copying +// files from a container. +// +// Deprecated: Use DownloadFromContainerOptions and DownloadFromContainer instead. +type CopyFromContainerOptions struct { + OutputStream io.Writer `json:"-"` + Container string `json:"-"` + Resource string + Context context.Context `json:"-"` +} + +// CopyFromContainer copies files from a container. +// +// Deprecated: Use DownloadFromContainer and DownloadFromContainer instead. +func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + if c.serverAPIVersion == nil { + c.checkAPIVersion() + } + if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion124) { + return errors.New("go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead") + } + url := fmt.Sprintf("/containers/%s/copy", opts.Container) + resp, err := c.do("POST", url, doOptions{ + data: opts, + context: opts.Context, + }) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchContainer{ID: opts.Container} + } + return err + } + defer resp.Body.Close() + _, err = io.Copy(opts.OutputStream, resp.Body) + return err +} + +// WaitContainer blocks until the given container stops, return the exit code +// of the container status. +// +// See https://goo.gl/4AGweZ for more details. +func (c *Client) WaitContainer(id string) (int, error) { + return c.waitContainer(id, doOptions{}) +} + +// WaitContainerWithContext blocks until the given container stops, return the exit code +// of the container status. The context object can be used to cancel the +// inspect request. +// +// See https://goo.gl/4AGweZ for more details. +func (c *Client) WaitContainerWithContext(id string, ctx context.Context) (int, error) { + return c.waitContainer(id, doOptions{context: ctx}) +} + +func (c *Client) waitContainer(id string, opts doOptions) (int, error) { + resp, err := c.do("POST", "/containers/"+id+"/wait", opts) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return 0, &NoSuchContainer{ID: id} + } + return 0, err + } + defer resp.Body.Close() + var r struct{ StatusCode int } + if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { + return 0, err + } + return r.StatusCode, nil +} + +// CommitContainerOptions aggregates parameters to the CommitContainer method. +// +// See https://goo.gl/CzIguf for more details. +type CommitContainerOptions struct { + Container string + Repository string `qs:"repo"` + Tag string + Message string `qs:"comment"` + Author string + Changes []string `qs:"changes"` + Run *Config `qs:"-"` + Context context.Context +} + +// CommitContainer creates a new image from a container's changes. +// +// See https://goo.gl/CzIguf for more details. +func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { + path := "/commit?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{ + data: opts.Run, + context: opts.Context, + }) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.Container} + } + return nil, err + } + defer resp.Body.Close() + var image Image + if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { + return nil, err + } + return &image, nil +} + +// AttachToContainerOptions is the set of options that can be used when +// attaching to a container. +// +// See https://goo.gl/JF10Zk for more details. +type AttachToContainerOptions struct { + Container string `qs:"-"` + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + + // If set, after a successful connect, a sentinel will be sent and then the + // client will block on receive before continuing. + // + // It must be an unbuffered channel. Using a buffered channel can lead + // to unexpected behavior. + Success chan struct{} + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` + + // Get container logs, sending it to OutputStream. + Logs bool + + // Stream the response? + Stream bool + + // Attach to stdin, and use InputStream. + Stdin bool + + // Attach to stdout, and use OutputStream. + Stdout bool + + // Attach to stderr, and use ErrorStream. + Stderr bool +} + +// AttachToContainer attaches to a container, using the given options. +// +// See https://goo.gl/JF10Zk for more details. +func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { + cw, err := c.AttachToContainerNonBlocking(opts) + if err != nil { + return err + } + return cw.Wait() +} + +// AttachToContainerNonBlocking attaches to a container, using the given options. +// This function does not block. +// +// See https://goo.gl/NKpkFk for more details. +func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (CloseWaiter, error) { + if opts.Container == "" { + return nil, &NoSuchContainer{ID: opts.Container} + } + path := "/containers/" + opts.Container + "/attach?" + queryString(opts) + return c.hijack("POST", path, hijackOptions{ + success: opts.Success, + setRawTerminal: opts.RawTerminal, + in: opts.InputStream, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + }) +} + +// LogsOptions represents the set of options used when getting logs from a +// container. +// +// See https://goo.gl/krK0ZH for more details. +type LogsOptions struct { + Context context.Context + Container string `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + Tail string + + Since int64 + Follow bool + Stdout bool + Stderr bool + Timestamps bool + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` +} + +// Logs gets stdout and stderr logs from the specified container. +// +// When LogsOptions.RawTerminal is set to false, go-dockerclient will multiplex +// the streams and send the containers stdout to LogsOptions.OutputStream, and +// stderr to LogsOptions.ErrorStream. +// +// When LogsOptions.RawTerminal is true, callers will get the raw stream on +// LogsOptions.OutputStream. The caller can use libraries such as dlog +// (github.com/ahmetalpbalkan/dlog). +// +// See https://goo.gl/krK0ZH for more details. +func (c *Client) Logs(opts LogsOptions) error { + if opts.Container == "" { + return &NoSuchContainer{ID: opts.Container} + } + if opts.Tail == "" { + opts.Tail = "all" + } + path := "/containers/" + opts.Container + "/logs?" + queryString(opts) + return c.stream("GET", path, streamOptions{ + setRawTerminal: opts.RawTerminal, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} + +// ResizeContainerTTY resizes the terminal to the given height and width. +// +// See https://goo.gl/FImjeq for more details. +func (c *Client) ResizeContainerTTY(id string, height, width int) error { + params := make(url.Values) + params.Set("h", strconv.Itoa(height)) + params.Set("w", strconv.Itoa(width)) + resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ExportContainerOptions is the set of parameters to the ExportContainer +// method. +// +// See https://goo.gl/yGJCIh for more details. +type ExportContainerOptions struct { + ID string + OutputStream io.Writer + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// ExportContainer export the contents of container id as tar archive +// and prints the exported contents to stdout. +// +// See https://goo.gl/yGJCIh for more details. +func (c *Client) ExportContainer(opts ExportContainerOptions) error { + if opts.ID == "" { + return &NoSuchContainer{ID: opts.ID} + } + url := fmt.Sprintf("/containers/%s/export", opts.ID) + return c.stream("GET", url, streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} + +// PruneContainersOptions specify parameters to the PruneContainers function. +// +// See https://goo.gl/wnkgDT for more details. +type PruneContainersOptions struct { + Filters map[string][]string + Context context.Context +} + +// PruneContainersResults specify results from the PruneContainers function. +// +// See https://goo.gl/wnkgDT for more details. +type PruneContainersResults struct { + ContainersDeleted []string + SpaceReclaimed int64 +} + +// PruneContainers deletes containers which are stopped. +// +// See https://goo.gl/wnkgDT for more details. +func (c *Client) PruneContainers(opts PruneContainersOptions) (*PruneContainersResults, error) { + path := "/containers/prune?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var results PruneContainersResults + if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { + return nil, err + } + return &results, nil +} + +// NoSuchContainer is the error returned when a given container does not exist. +type NoSuchContainer struct { + ID string + Err error +} + +func (err *NoSuchContainer) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such container: " + err.ID +} + +// ContainerAlreadyRunning is the error returned when a given container is +// already running. +type ContainerAlreadyRunning struct { + ID string +} + +func (err *ContainerAlreadyRunning) Error() string { + return "Container already running: " + err.ID +} + +// ContainerNotRunning is the error returned when a given container is not +// running. +type ContainerNotRunning struct { + ID string +} + +func (err *ContainerNotRunning) Error() string { + return "Container not running: " + err.ID +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/distribution.go b/vendor/github.com/ory/dockertest/v3/docker/distribution.go new file mode 100644 index 00000000..a211b192 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/distribution.go @@ -0,0 +1,26 @@ +// Copyright 2017 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + + "github.com/ory/dockertest/v3/docker/types/registry" +) + +// InspectDistribution returns image digest and platform information by contacting the registry +func (c *Client) InspectDistribution(name string) (*registry.DistributionInspect, error) { + path := "/distribution/" + name + "/json" + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var distributionInspect registry.DistributionInspect + if err := json.NewDecoder(resp.Body).Decode(&distributionInspect); err != nil { + return nil, err + } + return &distributionInspect, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/env.go b/vendor/github.com/ory/dockertest/v3/docker/env.go new file mode 100644 index 00000000..13fedfb1 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/env.go @@ -0,0 +1,172 @@ +// Copyright 2014 Docker authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the DOCKER-LICENSE file. + +package docker + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// Env represents a list of key-pair represented in the form KEY=VALUE. +type Env []string + +// Get returns the string value of the given key. +func (env *Env) Get(key string) (value string) { + return env.Map()[key] +} + +// Exists checks whether the given key is defined in the internal Env +// representation. +func (env *Env) Exists(key string) bool { + _, exists := env.Map()[key] + return exists +} + +// GetBool returns a boolean representation of the given key. The key is false +// whenever its value if 0, no, false, none or an empty string. Any other value +// will be interpreted as true. +func (env *Env) GetBool(key string) (value bool) { + s := strings.ToLower(strings.Trim(env.Get(key), " \t")) + if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { + return false + } + return true +} + +// SetBool defines a boolean value to the given key. +func (env *Env) SetBool(key string, value bool) { + if value { + env.Set(key, "1") + } else { + env.Set(key, "0") + } +} + +// GetInt returns the value of the provided key, converted to int. +// +// It the value cannot be represented as an integer, it returns -1. +func (env *Env) GetInt(key string) int { + return int(env.GetInt64(key)) +} + +// SetInt defines an integer value to the given key. +func (env *Env) SetInt(key string, value int) { + env.Set(key, strconv.Itoa(value)) +} + +// GetInt64 returns the value of the provided key, converted to int64. +// +// It the value cannot be represented as an integer, it returns -1. +func (env *Env) GetInt64(key string) int64 { + s := strings.Trim(env.Get(key), " \t") + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return -1 + } + return val +} + +// SetInt64 defines an integer (64-bit wide) value to the given key. +func (env *Env) SetInt64(key string, value int64) { + env.Set(key, strconv.FormatInt(value, 10)) +} + +// GetJSON unmarshals the value of the provided key in the provided iface. +// +// iface is a value that can be provided to the json.Unmarshal function. +func (env *Env) GetJSON(key string, iface interface{}) error { + sval := env.Get(key) + if sval == "" { + return nil + } + return json.Unmarshal([]byte(sval), iface) +} + +// SetJSON marshals the given value to JSON format and stores it using the +// provided key. +func (env *Env) SetJSON(key string, value interface{}) error { + sval, err := json.Marshal(value) + if err != nil { + return err + } + env.Set(key, string(sval)) + return nil +} + +// GetList returns a list of strings matching the provided key. It handles the +// list as a JSON representation of a list of strings. +// +// If the given key matches to a single string, it will return a list +// containing only the value that matches the key. +func (env *Env) GetList(key string) []string { + sval := env.Get(key) + if sval == "" { + return nil + } + var l []string + if err := json.Unmarshal([]byte(sval), &l); err != nil { + l = append(l, sval) + } + return l +} + +// SetList stores the given list in the provided key, after serializing it to +// JSON format. +func (env *Env) SetList(key string, value []string) error { + return env.SetJSON(key, value) +} + +// Set defines the value of a key to the given string. +func (env *Env) Set(key, value string) { + *env = append(*env, key+"="+value) +} + +// Decode decodes `src` as a json dictionary, and adds each decoded key-value +// pair to the environment. +// +// If `src` cannot be decoded as a json dictionary, an error is returned. +func (env *Env) Decode(src io.Reader) error { + m := make(map[string]interface{}) + if err := json.NewDecoder(src).Decode(&m); err != nil { + return err + } + for k, v := range m { + env.SetAuto(k, v) + } + return nil +} + +// SetAuto will try to define the Set* method to call based on the given value. +func (env *Env) SetAuto(key string, value interface{}) { + if fval, ok := value.(float64); ok { + env.SetInt64(key, int64(fval)) + } else if sval, ok := value.(string); ok { + env.Set(key, sval) + } else if val, err := json.Marshal(value); err == nil { + env.Set(key, string(val)) + } else { + env.Set(key, fmt.Sprintf("%v", value)) + } +} + +// Map returns the map representation of the env. +func (env *Env) Map() map[string]string { + if len(*env) == 0 { + return nil + } + m := make(map[string]string) + for _, kv := range *env { + parts := strings.SplitN(kv, "=", 2) + if len(parts) == 1 { + m[parts[0]] = "" + } else { + m[parts[0]] = parts[1] + } + } + return m +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/event.go b/vendor/github.com/ory/dockertest/v3/docker/event.go new file mode 100644 index 00000000..18ae5d5a --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/event.go @@ -0,0 +1,410 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "net/http/httputil" + "sync" + "sync/atomic" + "time" +) + +// APIEvents represents events coming from the Docker API +// The fields in the Docker API changed in API version 1.22, and +// events for more than images and containers are now fired off. +// To maintain forward and backward compatibility, go-dockerclient +// replicates the event in both the new and old format as faithfully as possible. +// +// For events that only exist in 1.22 in later, `Status` is filled in as +// `"Type:Action"` instead of just `Action` to allow for older clients to +// differentiate and not break if they rely on the pre-1.22 Status types. +// +// The transformEvent method can be consulted for more information about how +// events are translated from new/old API formats +type APIEvents struct { + // New API Fields in 1.22 + Action string `json:"action,omitempty"` + Type string `json:"type,omitempty"` + Actor APIActor `json:"actor,omitempty"` + + // Old API fields for < 1.22 + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + // Fields in both + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} + +// APIActor represents an actor that accomplishes something for an event +type APIActor struct { + ID string `json:"id,omitempty"` + Attributes map[string]string `json:"attributes,omitempty"` +} + +type eventMonitoringState struct { + // `sync/atomic` expects the first word in an allocated struct to be 64-bit + // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details. + lastSeen int64 + sync.RWMutex + sync.WaitGroup + enabled bool + C chan *APIEvents + errC chan error + listeners []chan<- *APIEvents +} + +const ( + maxMonitorConnRetries = 5 + retryInitialWaitTime = 10. +) + +var ( + // ErrNoListeners is the error returned when no listeners are available + // to receive an event. + ErrNoListeners = errors.New("no listeners present to receive event") + + // ErrListenerAlreadyExists is the error returned when the listerner already + // exists. + ErrListenerAlreadyExists = errors.New("listener already exists for docker events") + + // ErrTLSNotSupported is the error returned when the client does not support + // TLS (this applies to the Windows named pipe client). + ErrTLSNotSupported = errors.New("tls not supported by this client") + + // EOFEvent is sent when the event listener receives an EOF error. + EOFEvent = &APIEvents{ + Type: "EOF", + Status: "EOF", + } +) + +// AddEventListener adds a new listener to container events in the Docker API. +// +// The parameter is a channel through which events will be sent. +func (c *Client) AddEventListener(listener chan<- *APIEvents) error { + var err error + if !c.eventMonitor.isEnabled() { + err = c.eventMonitor.enableEventMonitoring(c) + if err != nil { + return err + } + } + return c.eventMonitor.addListener(listener) +} + +// RemoveEventListener removes a listener from the monitor. +func (c *Client) RemoveEventListener(listener chan *APIEvents) error { + err := c.eventMonitor.removeListener(listener) + if err != nil { + return err + } + if c.eventMonitor.listernersCount() == 0 { + c.eventMonitor.disableEventMonitoring() + } + return nil +} + +func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error { + eventState.Lock() + defer eventState.Unlock() + if listenerExists(listener, &eventState.listeners) { + return ErrListenerAlreadyExists + } + eventState.Add(1) + eventState.listeners = append(eventState.listeners, listener) + return nil +} + +func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error { + eventState.Lock() + defer eventState.Unlock() + if listenerExists(listener, &eventState.listeners) { + var newListeners []chan<- *APIEvents + for _, l := range eventState.listeners { + if l != listener { + newListeners = append(newListeners, l) + } + } + eventState.listeners = newListeners + eventState.Add(-1) + } + return nil +} + +func (eventState *eventMonitoringState) closeListeners() { + for _, l := range eventState.listeners { + close(l) + eventState.Add(-1) + } + eventState.listeners = nil +} + +func (eventState *eventMonitoringState) listernersCount() int { + eventState.RLock() + defer eventState.RUnlock() + return len(eventState.listeners) +} + +func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool { + for _, b := range *list { + if b == a { + return true + } + } + return false +} + +func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { + eventState.Lock() + defer eventState.Unlock() + if !eventState.enabled { + eventState.enabled = true + atomic.StoreInt64(&eventState.lastSeen, 0) + eventState.C = make(chan *APIEvents, 100) + eventState.errC = make(chan error, 1) + go eventState.monitorEvents(c) + } + return nil +} + +func (eventState *eventMonitoringState) disableEventMonitoring() error { + eventState.Lock() + defer eventState.Unlock() + + eventState.closeListeners() + + eventState.Wait() + + if eventState.enabled { + eventState.enabled = false + close(eventState.C) + close(eventState.errC) + } + return nil +} + +func (eventState *eventMonitoringState) monitorEvents(c *Client) { + const ( + noListenersTimeout = 5 * time.Second + noListenersInterval = 10 * time.Millisecond + noListenersMaxTries = noListenersTimeout / noListenersInterval + ) + + var err error + for i := time.Duration(0); i < noListenersMaxTries && eventState.noListeners(); i++ { + time.Sleep(10 * time.Millisecond) + } + + if eventState.noListeners() { + // terminate if no listener is available after 5 seconds. + // Prevents goroutine leak when RemoveEventListener is called + // right after AddEventListener. + eventState.disableEventMonitoring() + return + } + + if err = eventState.connectWithRetry(c); err != nil { + // terminate if connect failed + eventState.disableEventMonitoring() + return + } + for eventState.isEnabled() { + timeout := time.After(100 * time.Millisecond) + select { + case ev, ok := <-eventState.C: + if !ok { + return + } + if ev == EOFEvent { + eventState.disableEventMonitoring() + return + } + eventState.updateLastSeen(ev) + eventState.sendEvent(ev) + case err = <-eventState.errC: + if err == ErrNoListeners { + eventState.disableEventMonitoring() + return + } else if err != nil { + defer func() { go eventState.monitorEvents(c) }() + return + } + case <-timeout: + continue + } + } +} + +func (eventState *eventMonitoringState) connectWithRetry(c *Client) error { + var retries int + eventState.RLock() + eventChan := eventState.C + errChan := eventState.errC + eventState.RUnlock() + err := c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan) + for ; err != nil && retries < maxMonitorConnRetries; retries++ { + waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries))) + time.Sleep(time.Duration(waitTime) * time.Millisecond) + eventState.RLock() + eventChan = eventState.C + errChan = eventState.errC + eventState.RUnlock() + err = c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan) + } + return err +} + +func (eventState *eventMonitoringState) noListeners() bool { + eventState.RLock() + defer eventState.RUnlock() + return len(eventState.listeners) == 0 +} + +func (eventState *eventMonitoringState) isEnabled() bool { + eventState.RLock() + defer eventState.RUnlock() + return eventState.enabled +} + +func (eventState *eventMonitoringState) sendEvent(event *APIEvents) { + eventState.RLock() + defer eventState.RUnlock() + eventState.Add(1) + defer eventState.Done() + if eventState.enabled { + if len(eventState.listeners) == 0 { + eventState.errC <- ErrNoListeners + return + } + + for _, listener := range eventState.listeners { + select { + case listener <- event: + default: + } + } + } +} + +func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) { + eventState.Lock() + defer eventState.Unlock() + if atomic.LoadInt64(&eventState.lastSeen) < e.Time { + atomic.StoreInt64(&eventState.lastSeen, e.Time) + } +} + +func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error { + uri := "/events" + if startTime != 0 { + uri += fmt.Sprintf("?since=%d", startTime) + } + protocol := c.endpointURL.Scheme + address := c.endpointURL.Path + if protocol != "unix" && protocol != "npipe" { + protocol = "tcp" + address = c.endpointURL.Host + } + var dial net.Conn + var err error + if c.TLSConfig == nil { + dial, err = c.Dialer.Dial(protocol, address) + } else { + netDialer, ok := c.Dialer.(*net.Dialer) + if !ok { + return ErrTLSNotSupported + } + dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig) + } + if err != nil { + return err + } + conn := httputil.NewClientConn(dial, nil) + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return err + } + res, err := conn.Do(req) + if err != nil { + return err + } + go func(res *http.Response, conn *httputil.ClientConn) { + defer conn.Close() + defer res.Body.Close() + decoder := json.NewDecoder(res.Body) + for { + var event APIEvents + if err = decoder.Decode(&event); err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + c.eventMonitor.RLock() + if c.eventMonitor.enabled && c.eventMonitor.C == eventChan { + // Signal that we're exiting. + eventChan <- EOFEvent + } + c.eventMonitor.RUnlock() + break + } + errChan <- err + } + if event.Time == 0 { + continue + } + transformEvent(&event) + c.eventMonitor.RLock() + if c.eventMonitor.enabled && c.eventMonitor.C == eventChan { + eventChan <- &event + } + c.eventMonitor.RUnlock() + } + }(res, conn) + return nil +} + +// transformEvent takes an event and determines what version it is from +// then populates both versions of the event +func transformEvent(event *APIEvents) { + // if event version is <= 1.21 there will be no Action and no Type + if event.Action == "" && event.Type == "" { + event.Action = event.Status + event.Actor.ID = event.ID + event.Actor.Attributes = map[string]string{} + switch event.Status { + case "delete", "import", "pull", "push", "tag", "untag": + event.Type = "image" + default: + event.Type = "container" + if event.From != "" { + event.Actor.Attributes["image"] = event.From + } + } + } else { + if event.Status == "" { + if event.Type == "image" || event.Type == "container" { + event.Status = event.Action + } else { + // Because just the Status has been overloaded with different Types + // if an event is not for an image or a container, we prepend the type + // to avoid problems for people relying on actions being only for + // images and containers + event.Status = event.Type + ":" + event.Action + } + } + if event.ID == "" { + event.ID = event.Actor.ID + } + if event.From == "" { + event.From = event.Actor.Attributes["image"] + } + } +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/exec.go b/vendor/github.com/ory/dockertest/v3/docker/exec.go new file mode 100644 index 00000000..3b875fa3 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/exec.go @@ -0,0 +1,213 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" +) + +// Exec is the type representing a `docker exec` instance and containing the +// instance ID +type Exec struct { + ID string `json:"Id,omitempty" yaml:"Id,omitempty"` +} + +// CreateExecOptions specify parameters to the CreateExecContainer function. +// +// See https://goo.gl/60TeBP for more details +type CreateExecOptions struct { + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"` + Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty" toml:"Cmd,omitempty"` + Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` + Context context.Context `json:"-"` + Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"` +} + +// CreateExec sets up an exec instance in a running container `id`, returning the exec +// instance, or an error in case of failure. +// +// See https://goo.gl/60TeBP for more details +func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { + if len(opts.Env) > 0 && c.serverAPIVersion.LessThan(apiVersion125) { + return nil, errors.New("exec configuration Env is only supported in API#1.25 and above") + } + path := fmt.Sprintf("/containers/%s/exec", opts.Container) + resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.Container} + } + return nil, err + } + defer resp.Body.Close() + var exec Exec + if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { + return nil, err + } + + return &exec, nil +} + +// StartExecOptions specify parameters to the StartExecContainer function. +// +// See https://goo.gl/1EeDWi for more details +type StartExecOptions struct { + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + + Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty" toml:"Detach,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"` + + // Use raw terminal? Usually true when the container contains a TTY. + RawTerminal bool `qs:"-"` + + // If set, after a successful connect, a sentinel will be sent and then the + // client will block on receive before continuing. + // + // It must be an unbuffered channel. Using a buffered channel can lead + // to unexpected behavior. + Success chan struct{} `json:"-"` + + Context context.Context `json:"-"` +} + +// StartExec starts a previously set up exec instance id. If opts.Detach is +// true, it returns after starting the exec command. Otherwise, it sets up an +// interactive session with the exec command. +// +// See https://goo.gl/1EeDWi for more details +func (c *Client) StartExec(id string, opts StartExecOptions) error { + cw, err := c.StartExecNonBlocking(id, opts) + if err != nil { + return err + } + if cw != nil { + return cw.Wait() + } + return nil +} + +// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is +// true, it returns after starting the exec command. Otherwise, it sets up an +// interactive session with the exec command. +// +// See https://goo.gl/1EeDWi for more details +func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) { + if id == "" { + return nil, &NoSuchExec{ID: id} + } + + path := fmt.Sprintf("/exec/%s/start", id) + + if opts.Detach { + resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchExec{ID: id} + } + return nil, err + } + defer resp.Body.Close() + return nil, nil + } + + return c.hijack("POST", path, hijackOptions{ + success: opts.Success, + setRawTerminal: opts.RawTerminal, + in: opts.InputStream, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + data: opts, + }) +} + +// ResizeExecTTY resizes the tty session used by the exec command id. This API +// is valid only if Tty was specified as part of creating and starting the exec +// command. +// +// See https://goo.gl/Mo5bxx for more details +func (c *Client) ResizeExecTTY(id string, height, width int) error { + params := make(url.Values) + params.Set("h", strconv.Itoa(height)) + params.Set("w", strconv.Itoa(width)) + + path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode()) + resp, err := c.do("POST", path, doOptions{}) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ExecProcessConfig is a type describing the command associated to a Exec +// instance. It's used in the ExecInspect type. +type ExecProcessConfig struct { + User string `json:"user,omitempty" yaml:"user,omitempty" toml:"user,omitempty"` + Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty" toml:"privileged,omitempty"` + Tty bool `json:"tty,omitempty" yaml:"tty,omitempty" toml:"tty,omitempty"` + EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty" toml:"entrypoint,omitempty"` + Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty" toml:"arguments,omitempty"` +} + +// ExecInspect is a type with details about a exec instance, including the +// exit code if the command has finished running. It's returned by a api +// call to /exec/(id)/json +// +// See https://goo.gl/ctMUiW for more details +type ExecInspect struct { + ID string `json:"ID,omitempty" yaml:"ID,omitempty" toml:"ID,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"` + Running bool `json:"Running,omitempty" yaml:"Running,omitempty" toml:"Running,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty" toml:"OpenStdin,omitempty"` + OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty" toml:"OpenStderr,omitempty"` + OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty" toml:"OpenStdout,omitempty"` + ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty" toml:"ProcessConfig,omitempty"` + ContainerID string `json:"ContainerID,omitempty" yaml:"ContainerID,omitempty" toml:"ContainerID,omitempty"` + DetachKeys string `json:"DetachKeys,omitempty" yaml:"DetachKeys,omitempty" toml:"DetachKeys,omitempty"` + CanRemove bool `json:"CanRemove,omitempty" yaml:"CanRemove,omitempty" toml:"CanRemove,omitempty"` +} + +// InspectExec returns low-level information about the exec command id. +// +// See https://goo.gl/ctMUiW for more details +func (c *Client) InspectExec(id string) (*ExecInspect, error) { + path := fmt.Sprintf("/exec/%s/json", id) + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchExec{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var exec ExecInspect + if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { + return nil, err + } + return &exec, nil +} + +// NoSuchExec is the error returned when a given exec instance does not exist. +type NoSuchExec struct { + ID string +} + +func (err *NoSuchExec) Error() string { + return "No such exec instance: " + err.ID +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/image.go b/vendor/github.com/ory/dockertest/v3/docker/image.go new file mode 100644 index 00000000..6c3f7b61 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/image.go @@ -0,0 +1,722 @@ +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + "time" +) + +// APIImages represent an image returned in the ListImages call. +type APIImages struct { + ID string `json:"Id" yaml:"Id" toml:"Id"` + RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty" toml:"RepoTags,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` + VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty" toml:"VirtualSize,omitempty"` + ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty" toml:"ParentId,omitempty"` + RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty" toml:"RepoDigests,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` +} + +// RootFS represents the underlying layers used by an image +type RootFS struct { + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` + Layers []string `json:"Layers,omitempty" yaml:"Layers,omitempty" toml:"Layers,omitempty"` +} + +// Image is the type representing a docker image and its various properties +type Image struct { + ID string `json:"Id" yaml:"Id" toml:"Id"` + RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty" toml:"RepoTags,omitempty"` + Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty" toml:"Parent,omitempty"` + Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty" toml:"Comment,omitempty"` + Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"` + Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"` + ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty" toml:"ContainerConfig,omitempty"` + DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty" toml:"DockerVersion,omitempty"` + Author string `json:"Author,omitempty" yaml:"Author,omitempty" toml:"Author,omitempty"` + Config *Config `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` + Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` + VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty" toml:"VirtualSize,omitempty"` + RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty" toml:"RepoDigests,omitempty"` + RootFS *RootFS `json:"RootFS,omitempty" yaml:"RootFS,omitempty" toml:"RootFS,omitempty"` + OS string `json:"Os,omitempty" yaml:"Os,omitempty" toml:"Os,omitempty"` +} + +// ImagePre012 serves the same purpose as the Image type except that it is for +// earlier versions of the Docker API (pre-012 to be specific) +type ImagePre012 struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +var ( + // ErrNoSuchImage is the error returned when the image does not exist. + ErrNoSuchImage = errors.New("no such image") + + // ErrMissingRepo is the error returned when the remote repository is + // missing. + ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'") + + // ErrMissingOutputStream is the error returned when no output stream + // is provided to some calls, like BuildImage. + ErrMissingOutputStream = errors.New("missing output stream") + + // ErrMultipleContexts is the error returned when both a ContextDir and + // InputStream are provided in BuildImageOptions + ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream") + + // ErrMustSpecifyNames is the error rreturned when the Names field on + // ExportImagesOptions is nil or empty + ErrMustSpecifyNames = errors.New("must specify at least one name to export") +) + +// ListImagesOptions specify parameters to the ListImages function. +// +// See https://goo.gl/BVzauZ for more details. +type ListImagesOptions struct { + Filters map[string][]string + All bool + Digests bool + Filter string + Context context.Context +} + +// ListImages returns the list of available images in the server. +// +// See https://goo.gl/BVzauZ for more details. +func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { + path := "/images/json?" + queryString(opts) + resp, err := c.do("GET", path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var images []APIImages + if err := json.NewDecoder(resp.Body).Decode(&images); err != nil { + return nil, err + } + return images, nil +} + +// ImageHistory represent a layer in an image's history returned by the +// ImageHistory call. +type ImageHistory struct { + ID string `json:"Id" yaml:"Id" toml:"Id"` + Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty" toml:"Tags,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Tags,omitempty"` + CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty" toml:"CreatedBy,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"` +} + +// ImageHistory returns the history of the image by its name or ID. +// +// See https://goo.gl/fYtxQa for more details. +func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { + resp, err := c.do("GET", "/images/"+name+"/history", doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + return nil, err + } + defer resp.Body.Close() + var history []ImageHistory + if err := json.NewDecoder(resp.Body).Decode(&history); err != nil { + return nil, err + } + return history, nil +} + +// RemoveImage removes an image by its name or ID. +// +// See https://goo.gl/Vd2Pck for more details. +func (c *Client) RemoveImage(name string) error { + resp, err := c.do("DELETE", "/images/"+name, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return ErrNoSuchImage + } + return err + } + resp.Body.Close() + return nil +} + +// RemoveImageOptions present the set of options available for removing an image +// from a registry. +// +// See https://goo.gl/Vd2Pck for more details. +type RemoveImageOptions struct { + Force bool `qs:"force"` + NoPrune bool `qs:"noprune"` + Context context.Context +} + +// RemoveImageExtended removes an image by its name or ID. +// Extra params can be passed, see RemoveImageOptions +// +// See https://goo.gl/Vd2Pck for more details. +func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { + uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) + resp, err := c.do("DELETE", uri, doOptions{context: opts.Context}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return ErrNoSuchImage + } + return err + } + resp.Body.Close() + return nil +} + +// InspectImage returns an image by its name or ID. +// +// See https://goo.gl/ncLTG8 for more details. +func (c *Client) InspectImage(name string) (*Image, error) { + resp, err := c.do("GET", "/images/"+name+"/json", doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, ErrNoSuchImage + } + return nil, err + } + defer resp.Body.Close() + + var image Image + + // if the caller elected to skip checking the server's version, assume it's the latest + if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) { + if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { + return nil, err + } + } else { + var imagePre012 ImagePre012 + if err := json.NewDecoder(resp.Body).Decode(&imagePre012); err != nil { + return nil, err + } + + image.ID = imagePre012.ID + image.Parent = imagePre012.Parent + image.Comment = imagePre012.Comment + image.Created = imagePre012.Created + image.Container = imagePre012.Container + image.ContainerConfig = imagePre012.ContainerConfig + image.DockerVersion = imagePre012.DockerVersion + image.Author = imagePre012.Author + image.Config = imagePre012.Config + image.Architecture = imagePre012.Architecture + image.Size = imagePre012.Size + } + + return &image, nil +} + +// PushImageOptions represents options to use in the PushImage method. +// +// See https://goo.gl/BZemGg for more details. +type PushImageOptions struct { + // Name of the image + Name string + + // Tag of the image + Tag string + + // Registry server to push the image + Registry string + + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + + Context context.Context +} + +// PushImage pushes an image to a remote registry, logging progress to w. +// +// An empty instance of AuthConfiguration may be used for unauthenticated +// pushes. +// +// See https://goo.gl/BZemGg for more details. +func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error { + if opts.Name == "" { + return ErrNoSuchImage + } + headers, err := headersWithAuth(auth) + if err != nil { + return err + } + name := opts.Name + opts.Name = "" + path := "/images/" + name + "/push?" + queryString(&opts) + return c.stream("POST", path, streamOptions{ + setRawTerminal: true, + rawJSONStream: opts.RawJSONStream, + headers: headers, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} + +// PullImageOptions present the set of options available for pulling an image +// from a registry. +// +// See https://goo.gl/qkoSsn for more details. +type PullImageOptions struct { + Repository string `qs:"fromImage"` + Tag string + + // Only required for Docker Engine 1.9 or 1.10 w/ Remote API < 1.21 + // and Docker Engine < 1.9 + // This parameter was removed in Docker Engine 1.11 + Registry string + + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// PullImage pulls an image from a remote registry, logging progress to +// opts.OutputStream. +// +// See https://goo.gl/qkoSsn for more details. +func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { + if opts.Repository == "" { + return ErrNoSuchImage + } + + headers, err := headersWithAuth(auth) + if err != nil { + return err + } + if opts.Tag == "" && strings.Contains(opts.Repository, "@") { + parts := strings.SplitN(opts.Repository, "@", 2) + opts.Repository = parts[0] + opts.Tag = parts[1] + } + return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) +} + +func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error { + path := "/images/create?" + qs + return c.stream("POST", path, streamOptions{ + setRawTerminal: true, + headers: headers, + in: in, + stdout: w, + rawJSONStream: rawJSONStream, + inactivityTimeout: timeout, + context: context, + }) +} + +// LoadImageOptions represents the options for LoadImage Docker API Call +// +// See https://goo.gl/rEsBV3 for more details. +type LoadImageOptions struct { + InputStream io.Reader + OutputStream io.Writer + Context context.Context +} + +// LoadImage imports a tarball docker image +// +// See https://goo.gl/rEsBV3 for more details. +func (c *Client) LoadImage(opts LoadImageOptions) error { + return c.stream("POST", "/images/load", streamOptions{ + setRawTerminal: true, + in: opts.InputStream, + stdout: opts.OutputStream, + context: opts.Context, + }) +} + +// ExportImageOptions represent the options for ExportImage Docker API call. +// +// See https://goo.gl/AuySaA for more details. +type ExportImageOptions struct { + Name string + OutputStream io.Writer + InactivityTimeout time.Duration + Context context.Context +} + +// ExportImage exports an image (as a tar file) into the stream. +// +// See https://goo.gl/AuySaA for more details. +func (c *Client) ExportImage(opts ExportImageOptions) error { + return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} + +// ExportImagesOptions represent the options for ExportImages Docker API call +// +// See https://goo.gl/N9XlDn for more details. +type ExportImagesOptions struct { + Names []string + OutputStream io.Writer `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// ExportImages exports one or more images (as a tar file) into the stream +// +// See https://goo.gl/N9XlDn for more details. +func (c *Client) ExportImages(opts ExportImagesOptions) error { + if opts.Names == nil || len(opts.Names) == 0 { + return ErrMustSpecifyNames + } + return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{ + setRawTerminal: true, + stdout: opts.OutputStream, + inactivityTimeout: opts.InactivityTimeout, + }) +} + +// ImportImageOptions present the set of informations available for importing +// an image from a source file or the stdin. +// +// See https://goo.gl/qkoSsn for more details. +type ImportImageOptions struct { + Repository string `qs:"repo"` + Source string `qs:"fromSrc"` + Tag string `qs:"tag"` + + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + RawJSONStream bool `qs:"-"` + InactivityTimeout time.Duration `qs:"-"` + Context context.Context +} + +// ImportImage imports an image from a url, a file or stdin +// +// See https://goo.gl/qkoSsn for more details. +func (c *Client) ImportImage(opts ImportImageOptions) error { + if opts.Repository == "" { + return ErrNoSuchImage + } + if opts.Source != "-" { + opts.InputStream = nil + } + if opts.Source != "-" && !isURL(opts.Source) { + f, err := os.Open(opts.Source) + if err != nil { + return err + } + opts.InputStream = f + opts.Source = "-" + } + return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context) +} + +// BuildImageOptions present the set of informations available for building an +// image from a tarfile with a Dockerfile in it. +// +// For more details about the Docker building process, see +// https://goo.gl/4nYHwV. +type BuildImageOptions struct { + Name string `qs:"t"` + Dockerfile string `qs:"dockerfile"` + NoCache bool `qs:"nocache"` + CacheFrom []string `qs:"-"` + SuppressOutput bool `qs:"q"` + Pull bool `qs:"pull"` + RmTmpContainer bool `qs:"rm"` + ForceRmTmpContainer bool `qs:"forcerm"` + RawJSONStream bool `qs:"-"` + Memory int64 `qs:"memory"` + Memswap int64 `qs:"memswap"` + CPUShares int64 `qs:"cpushares"` + CPUQuota int64 `qs:"cpuquota"` + CPUPeriod int64 `qs:"cpuperiod"` + CPUSetCPUs string `qs:"cpusetcpus"` + Labels map[string]string `qs:"labels"` + InputStream io.Reader `qs:"-"` + OutputStream io.Writer `qs:"-"` + ErrorStream io.Writer `qs:"-"` + Remote string `qs:"remote"` + Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header + AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header + ContextDir string `qs:"-"` + Ulimits []ULimit `qs:"-"` + BuildArgs []BuildArg `qs:"-"` + NetworkMode string `qs:"networkmode"` + InactivityTimeout time.Duration `qs:"-"` + CgroupParent string `qs:"cgroupparent"` + SecurityOpt []string `qs:"securityopt"` + Target string `gs:"target"` + Context context.Context +} + +// BuildArg represents arguments that can be passed to the image when building +// it from a Dockerfile. +// +// For more details about the Docker building process, see +// https://goo.gl/4nYHwV. +type BuildArg struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` +} + +// BuildImage builds an image from a tarball's url or a Dockerfile in the input +// stream. +// +// See https://goo.gl/4nYHwV for more details. +func (c *Client) BuildImage(opts BuildImageOptions) error { + if opts.OutputStream == nil { + return ErrMissingOutputStream + } + headers, err := headersWithAuth(opts.Auth, c.versionedAuthConfigs(opts.AuthConfigs)) + if err != nil { + return err + } + + if opts.Remote != "" && opts.Name == "" { + opts.Name = opts.Remote + } + if opts.InputStream != nil || opts.ContextDir != "" { + headers["Content-Type"] = "application/tar" + } else if opts.Remote == "" { + return ErrMissingRepo + } + if opts.ContextDir != "" { + if opts.InputStream != nil { + return ErrMultipleContexts + } + var err error + if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil { + return err + } + } + qs := queryString(&opts) + + if c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion125) && len(opts.CacheFrom) > 0 { + if b, err := json.Marshal(opts.CacheFrom); err == nil { + item := url.Values(map[string][]string{}) + item.Add("cachefrom", string(b)) + qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + } + } + + if len(opts.Ulimits) > 0 { + if b, err := json.Marshal(opts.Ulimits); err == nil { + item := url.Values(map[string][]string{}) + item.Add("ulimits", string(b)) + qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + } + } + + if len(opts.BuildArgs) > 0 { + v := make(map[string]string) + for _, arg := range opts.BuildArgs { + v[arg.Name] = arg.Value + } + if b, err := json.Marshal(v); err == nil { + item := url.Values(map[string][]string{}) + item.Add("buildargs", string(b)) + qs = fmt.Sprintf("%s&%s", qs, item.Encode()) + } + } + + return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{ + setRawTerminal: true, + rawJSONStream: opts.RawJSONStream, + headers: headers, + in: opts.InputStream, + stdout: opts.OutputStream, + stderr: opts.ErrorStream, + inactivityTimeout: opts.InactivityTimeout, + context: opts.Context, + }) +} + +func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} { + if c.serverAPIVersion == nil { + c.checkAPIVersion() + } + if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) { + return AuthConfigurations119(authConfigs.Configs) + } + return authConfigs +} + +// TagImageOptions present the set of options to tag an image. +// +// See https://goo.gl/prHrvo for more details. +type TagImageOptions struct { + Repo string + Tag string + Force bool + Context context.Context +} + +// TagImage adds a tag to the image identified by the given name. +// +// See https://goo.gl/prHrvo for more details. +func (c *Client) TagImage(name string, opts TagImageOptions) error { + if name == "" { + return ErrNoSuchImage + } + resp, err := c.do("POST", "/images/"+name+"/tag?"+queryString(&opts), doOptions{ + context: opts.Context, + }) + + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound { + return ErrNoSuchImage + } + + return err +} + +func isURL(u string) bool { + p, err := url.Parse(u) + if err != nil { + return false + } + return p.Scheme == "http" || p.Scheme == "https" +} + +func headersWithAuth(auths ...interface{}) (map[string]string, error) { + var headers = make(map[string]string) + + for _, auth := range auths { + switch auth.(type) { + case AuthConfiguration: + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return nil, err + } + headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) + case AuthConfigurations, AuthConfigurations119: + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return nil, err + } + headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes()) + } + } + + return headers, nil +} + +// APIImageSearch reflect the result of a search on the Docker Hub. +// +// See https://goo.gl/KLO9IZ for more details. +type APIImageSearch struct { + Description string `json:"description,omitempty" yaml:"description,omitempty" toml:"description,omitempty"` + IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty" toml:"is_official,omitempty"` + IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty" toml:"is_automated,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty" toml:"name,omitempty"` + StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty" toml:"star_count,omitempty"` +} + +// SearchImages search the docker hub with a specific given term. +// +// See https://goo.gl/KLO9IZ for more details. +func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { + resp, err := c.do("GET", "/images/search?term="+term, doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var searchResult []APIImageSearch + if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { + return nil, err + } + return searchResult, nil +} + +// SearchImagesEx search the docker hub with a specific given term and authentication. +// +// See https://goo.gl/KLO9IZ for more details. +func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImageSearch, error) { + headers, err := headersWithAuth(auth) + if err != nil { + return nil, err + } + + resp, err := c.do("GET", "/images/search?term="+term, doOptions{ + headers: headers, + }) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + var searchResult []APIImageSearch + if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { + return nil, err + } + + return searchResult, nil +} + +// PruneImagesOptions specify parameters to the PruneImages function. +// +// See https://goo.gl/qfZlbZ for more details. +type PruneImagesOptions struct { + Filters map[string][]string + Context context.Context +} + +// PruneImagesResults specify results from the PruneImages function. +// +// See https://goo.gl/qfZlbZ for more details. +type PruneImagesResults struct { + ImagesDeleted []struct{ Untagged, Deleted string } + SpaceReclaimed int64 +} + +// PruneImages deletes images which are unused. +// +// See https://goo.gl/qfZlbZ for more details. +func (c *Client) PruneImages(opts PruneImagesOptions) (*PruneImagesResults, error) { + path := "/images/prune?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var results PruneImagesResults + if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { + return nil, err + } + return &results, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/misc.go b/vendor/github.com/ory/dockertest/v3/docker/misc.go new file mode 100644 index 00000000..394cf9f0 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/misc.go @@ -0,0 +1,181 @@ +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "net" + "strings" + +) + +// Version returns version information about the docker server. +// +// See https://goo.gl/mU7yje for more details. +func (c *Client) Version() (*Env, error) { + resp, err := c.do("GET", "/version", doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var env Env + if err := env.Decode(resp.Body); err != nil { + return nil, err + } + return &env, nil +} + +// DockerInfo contains information about the Docker server +// +// See https://goo.gl/bHUoz9 for more details. +type DockerInfo struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + OomKillDisable bool + ExperimentalBuild bool + NFd int + NGoroutines int + SystemTime string + ExecutionDriver string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *ServiceConfig + SecurityOptions []string + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ServerVersion string + ClusterStore string + ClusterAdvertise string + Isolation string + InitBinary string + DefaultRuntime string + LiveRestoreEnabled bool + //Swarm swarm.Info +} + +// PluginsInfo is a struct with the plugins registered with the docker daemon +// +// for more information, see: https://goo.gl/bHUoz9 +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string +} + +// ServiceConfig stores daemon registry services configuration. +// +// for more information, see: https://goo.gl/7iFFDz +type ServiceConfig struct { + InsecureRegistryCIDRs []*NetIPNet + IndexConfigs map[string]*IndexInfo + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON. +// +// for more information, see: https://goo.gl/7iFFDz +type NetIPNet net.IPNet + +// MarshalJSON returns the JSON representation of the IPNet. +// +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON. +// +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry. +// +// for more information, see: https://goo.gl/7iFFDz +type IndexInfo struct { + Name string + Mirrors []string + Secure bool + Official bool +} + +// Info returns system-wide information about the Docker server. +// +// See https://goo.gl/ElTHi2 for more details. +func (c *Client) Info() (*DockerInfo, error) { + resp, err := c.do("GET", "/info", doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var info DockerInfo + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + return nil, err + } + return &info, nil +} + +// ParseRepositoryTag gets the name of the repository and returns it splitted +// in two parts: the repository and the tag. It ignores the digest when it is +// present. +// +// Some examples: +// +// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest +// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, "" +// busybox:latest@sha256:4a731fb46adc5cefe3ae374a8b6020fc1b6ad667a279647766e9a3cd89f6fa92 -> busybox, latest +func ParseRepositoryTag(repoTag string) (repository string, tag string) { + parts := strings.SplitN(repoTag, "@", 2) + repoTag = parts[0] + n := strings.LastIndex(repoTag, ":") + if n < 0 { + return repoTag, "" + } + if tag := repoTag[n+1:]; !strings.Contains(tag, "/") { + return repoTag[:n], tag + } + return repoTag, "" +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/network.go b/vendor/github.com/ory/dockertest/v3/docker/network.go new file mode 100644 index 00000000..c6ddb22c --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/network.go @@ -0,0 +1,321 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" +) + +// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the +// network already exists. +var ErrNetworkAlreadyExists = errors.New("network already exists") + +// Network represents a network. +// +// See https://goo.gl/6GugX3 for more details. +type Network struct { + Name string + ID string `json:"Id"` + Scope string + Driver string + IPAM IPAMOptions + Containers map[string]Endpoint + Options map[string]string + Internal bool + EnableIPv6 bool `json:"EnableIPv6"` + Labels map[string]string +} + +// Endpoint contains network resources allocated and used for a container in a network +// +// See https://goo.gl/6GugX3 for more details. +type Endpoint struct { + Name string + ID string `json:"EndpointID"` + MacAddress string + IPv4Address string + IPv6Address string +} + +// ListNetworks returns all networks. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) ListNetworks() ([]Network, error) { + resp, err := c.do("GET", "/networks", doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var networks []Network + if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil { + return nil, err + } + return networks, nil +} + +// NetworkFilterOpts is an aggregation of key=value that Docker +// uses to filter networks +type NetworkFilterOpts map[string]map[string]bool + +// FilteredListNetworks returns all networks with the filters applied +// +// See goo.gl/zd2mx4 for more details. +func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) { + params, err := json.Marshal(opts) + if err != nil { + return nil, err + } + path := "/networks?filters=" + string(params) + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var networks []Network + if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil { + return nil, err + } + return networks, nil +} + +// NetworkInfo returns information about a network by its ID. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) NetworkInfo(id string) (*Network, error) { + path := "/networks/" + id + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchNetwork{ID: id} + } + return nil, err + } + defer resp.Body.Close() + var network Network + if err := json.NewDecoder(resp.Body).Decode(&network); err != nil { + return nil, err + } + return &network, nil +} + +// CreateNetworkOptions specify parameters to the CreateNetwork function and +// (for now) is the expected body of the "create network" http request message +// +// See https://goo.gl/6GugX3 for more details. +type CreateNetworkOptions struct { + Name string `json:"Name" yaml:"Name" toml:"Name"` + Driver string `json:"Driver" yaml:"Driver" toml:"Driver"` + IPAM *IPAMOptions `json:"IPAM,omitempty" yaml:"IPAM" toml:"IPAM"` + Options map[string]interface{} `json:"Options" yaml:"Options" toml:"Options"` + Labels map[string]string `json:"Labels" yaml:"Labels" toml:"Labels"` + CheckDuplicate bool `json:"CheckDuplicate" yaml:"CheckDuplicate" toml:"CheckDuplicate"` + Internal bool `json:"Internal" yaml:"Internal" toml:"Internal"` + EnableIPv6 bool `json:"EnableIPv6" yaml:"EnableIPv6" toml:"EnableIPv6"` + Context context.Context `json:"-"` +} + +// IPAMOptions controls IP Address Management when creating a network +// +// See https://goo.gl/T8kRVH for more details. +type IPAMOptions struct { + Driver string `json:"Driver" yaml:"Driver" toml:"Driver"` + Config []IPAMConfig `json:"Config" yaml:"Config" toml:"Config"` + Options map[string]string `json:"Options" yaml:"Options" toml:"Options"` +} + +// IPAMConfig represents IPAM configurations +// +// See https://goo.gl/T8kRVH for more details. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// CreateNetwork creates a new network, returning the network instance, +// or an error in case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { + resp, err := c.do( + "POST", + "/networks/create", + doOptions{ + data: opts, + context: opts.Context, + }, + ) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + type createNetworkResponse struct { + ID string + } + var ( + network Network + cnr createNetworkResponse + ) + if err := json.NewDecoder(resp.Body).Decode(&cnr); err != nil { + return nil, err + } + + network.Name = opts.Name + network.ID = cnr.ID + network.Driver = opts.Driver + + return &network, nil +} + +// RemoveNetwork removes a network or returns an error in case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) RemoveNetwork(id string) error { + resp, err := c.do("DELETE", "/networks/"+id, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchNetwork{ID: id} + } + return err + } + resp.Body.Close() + return nil +} + +// NetworkConnectionOptions specify parameters to the ConnectNetwork and +// DisconnectNetwork function. +// +// See https://goo.gl/RV7BJU for more details. +type NetworkConnectionOptions struct { + Container string + + // EndpointConfig is only applicable to the ConnectNetwork call + EndpointConfig *EndpointConfig `json:"EndpointConfig,omitempty"` + + // Force is only applicable to the DisconnectNetwork call + Force bool + + Context context.Context `json:"-"` +} + +// EndpointConfig stores network endpoint details +// +// See https://goo.gl/RV7BJU for more details. +type EndpointConfig struct { + IPAMConfig *EndpointIPAMConfig `json:"IPAMConfig,omitempty" yaml:"IPAMConfig,omitempty" toml:"IPAMConfig,omitempty"` + Links []string `json:"Links,omitempty" yaml:"Links,omitempty" toml:"Links,omitempty"` + Aliases []string `json:"Aliases,omitempty" yaml:"Aliases,omitempty" toml:"Aliases,omitempty"` + NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"` + EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"` + Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"` + IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"` + IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"` + IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"` + GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"` + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for an +// endpoint +// +// See https://goo.gl/RV7BJU for more details. +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` +} + +// ConnectNetwork adds a container to a network or returns an error in case of +// failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error { + resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{ + data: opts, + context: opts.Context, + }) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} + } + return err + } + resp.Body.Close() + return nil +} + +// DisconnectNetwork removes a container from a network or returns an error in +// case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error { + resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} + } + return err + } + resp.Body.Close() + return nil +} + +// PruneNetworksOptions specify parameters to the PruneNetworks function. +// +// See https://goo.gl/kX0S9h for more details. +type PruneNetworksOptions struct { + Filters map[string][]string + Context context.Context +} + +// PruneNetworksResults specify results from the PruneNetworks function. +// +// See https://goo.gl/kX0S9h for more details. +type PruneNetworksResults struct { + NetworksDeleted []string +} + +// PruneNetworks deletes networks which are unused. +// +// See https://goo.gl/kX0S9h for more details. +func (c *Client) PruneNetworks(opts PruneNetworksOptions) (*PruneNetworksResults, error) { + path := "/networks/prune?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var results PruneNetworksResults + if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { + return nil, err + } + return &results, nil +} + +// NoSuchNetwork is the error returned when a given network does not exist. +type NoSuchNetwork struct { + ID string +} + +func (err *NoSuchNetwork) Error() string { + return fmt.Sprintf("No such network: %s", err.ID) +} + +// NoSuchNetworkOrContainer is the error returned when a given network or +// container does not exist. +type NoSuchNetworkOrContainer struct { + NetworkID string + ContainerID string +} + +func (err *NoSuchNetworkOrContainer) Error() string { + return fmt.Sprintf("No such network (%s) or container (%s)", err.NetworkID, err.ContainerID) +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/env.go b/vendor/github.com/ory/dockertest/v3/docker/opts/env.go new file mode 100644 index 00000000..4fbd470b --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/env.go @@ -0,0 +1,48 @@ +package opts + +import ( + "fmt" + "os" + "runtime" + "strings" + + "github.com/pkg/errors" +) + +// ValidateEnv validates an environment variable and returns it. +// If no value is specified, it returns the current value using os.Getenv. +// +// As on ParseEnvFile and related to #16585, environment variable names +// are not validate what so ever, it's up to application inside docker +// to validate them or not. +// +// The only validation here is to check if name is empty, per #25099 +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if arr[0] == "" { + return "", errors.Errorf("invalid environment variable: %s", val) + } + if len(arr) > 1 { + return val, nil + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if runtime.GOOS == "windows" { + // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. + if strings.EqualFold(parts[0], name) { + return true + } + } + if parts[0] == name { + return true + } + } + return false +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/hosts.go b/vendor/github.com/ory/dockertest/v3/docker/opts/hosts.go new file mode 100644 index 00000000..f46b8ee7 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/hosts.go @@ -0,0 +1,165 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) + // DefaultNamedPipe defines the default named pipe used by docker on Windows + DefaultNamedPipe = `//./pipe/docker_engine` +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + host := strings.TrimSpace(val) + // The empty string means default and is not handled by parseDaemonHost + if host != "" { + _, err := parseDaemonHost(host) + if err != nil { + return val, err + } + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for TLS + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultToTLS bool, val string) (string, error) { + host := strings.TrimSpace(val) + if host == "" { + if defaultToTLS { + host = DefaultTLSHost + } else { + host = DefaultHost + } + } else { + var err error + host, err = parseDaemonHost(host) + if err != nil { + return val, err + } + } + return host, nil +} + +// parseDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. +func parseDaemonHost(addr string) (string, error) { + addrParts := strings.SplitN(addr, "://", 2) + if len(addrParts) == 1 && addrParts[0] != "" { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], DefaultTCPHost) + case "unix": + return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) + case "npipe": + return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseSimpleProtoAddr parses and validates that the specified address is a valid +// socket address for simple protocols like unix and npipe. It returns a formatted +// socket address, either using the address parsed from addr, or the contents of +// defaultAddr if addr is a blank string. +func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, proto+"://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("%s://%s", proto, addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // try port addition once + host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) + } + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/hosts_unix.go b/vendor/github.com/ory/dockertest/v3/docker/opts/hosts_unix.go new file mode 100644 index 00000000..611407a9 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/hosts_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package opts + +import "fmt" + +// DefaultHost constant defines the default host string used by docker on other hosts than Windows +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/hosts_windows.go b/vendor/github.com/ory/dockertest/v3/docker/opts/hosts_windows.go new file mode 100644 index 00000000..684f0e12 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/hosts_windows.go @@ -0,0 +1,4 @@ +package opts + +// DefaultHost constant defines the default host string used by docker on Windows +var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/ip.go b/vendor/github.com/ory/dockertest/v3/docker/opts/ip.go new file mode 100644 index 00000000..10950639 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/ip.go @@ -0,0 +1,47 @@ +package opts + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parsable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} + +// Type returns the type of the option +func (o *IPOpt) Type() string { + return "ip" +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/opts.go b/vendor/github.com/ory/dockertest/v3/docker/opts/opts.go new file mode 100644 index 00000000..a2cc5e33 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/opts.go @@ -0,0 +1,337 @@ +package opts + +import ( + "fmt" + "net" + "path" + "regexp" + "strings" + + units "github.com/docker/go-units" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + if len(*opts.values) == 0 { + return "" + } + return fmt.Sprintf("%v", *opts.values) +} + +// Set validates if needed the input value and adds it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +// Type returns a string name for this Option type +func (opts *ListOpts) Type() string { + return "list" +} + +// WithValidator returns the ListOpts with validator set. +func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { + opts.validator = validator + return opts +} + +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + +// MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", opts.values) +} + +// Type returns a string name for this Option type +func (opts *MapOpts) Type() string { + return "map" +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateLabel validates that the specified string is a valid label, and returns it. +// Labels are in the form on key=value. +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} + +// ValidateSingleGenericResource validates that a single entry in the +// generic resource list is valid. +// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't +func ValidateSingleGenericResource(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("invalid node-generic-resource format `%s` expected `name=value`", val) + } + return val, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get a HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} + +// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) +type MemBytes int64 + +// String returns the string format of the human readable memory bytes +func (m *MemBytes) String() string { + // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. + // We return "0" in case value is 0 here so that the default value is hidden. + // (Sometimes "default 0 B" is actually misleading) + if m.Value() != 0 { + return units.BytesSize(float64(m.Value())) + } + return "0" +} + +// Set sets the value of the MemBytes by passing a string +func (m *MemBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = MemBytes(val) + return err +} + +// Type returns the type +func (m *MemBytes) Type() string { + return "bytes" +} + +// Value returns the value in int64 +func (m *MemBytes) Value() int64 { + return int64(*m) +} + +// UnmarshalJSON is the customized unmarshaler for MemBytes +func (m *MemBytes) UnmarshalJSON(s []byte) error { + if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { + return fmt.Errorf("invalid size: %q", s) + } + val, err := units.RAMInBytes(string(s[1 : len(s)-1])) + *m = MemBytes(val) + return err +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/opts_unix.go b/vendor/github.com/ory/dockertest/v3/docker/opts/opts_unix.go new file mode 100644 index 00000000..8770e384 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/opts_unix.go @@ -0,0 +1,22 @@ +// +build !windows + +package opts + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" +) + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 +const DefaultHTTPHost = "localhost" + +// MountParser parses mount path. +func MountParser(mount string) (source, destination string, err error) { + sd := strings.Split(mount, ":") + if len(sd) == 2 { + return sd[0], sd[1], nil + } + return "", "", errors.Wrap(fmt.Errorf("invalid mount format: got %s, expected :", mount), "") +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/opts_windows.go b/vendor/github.com/ory/dockertest/v3/docker/opts/opts_windows.go new file mode 100644 index 00000000..657ab514 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/opts_windows.go @@ -0,0 +1,72 @@ +package opts + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" +) + +// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP5. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" + +// MountParser parses mount path. +func MountParser(mount string) (source, destination string, err error) { + sd := strings.Split(mount, ":") + if len(sd) == 3 { + return sd[0] + sd[1], sd[2], nil + } + return "", "", errors.Wrap(fmt.Errorf("invalid mount format: got %s, expected ::", mount), "") +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/quotedstring.go b/vendor/github.com/ory/dockertest/v3/docker/opts/quotedstring.go new file mode 100644 index 00000000..09c68a52 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/quotedstring.go @@ -0,0 +1,37 @@ +package opts + +// QuotedString is a string that may have extra quotes around the value. The +// quotes are stripped from the value. +type QuotedString struct { + value *string +} + +// Set sets a new value +func (s *QuotedString) Set(val string) error { + *s.value = trimQuotes(val) + return nil +} + +// Type returns the type of the value +func (s *QuotedString) Type() string { + return "string" +} + +func (s *QuotedString) String() string { + return *s.value +} + +func trimQuotes(value string) string { + lastIndex := len(value) - 1 + for _, char := range []byte{'\'', '"'} { + if value[0] == char && value[lastIndex] == char { + return value[1:lastIndex] + } + } + return value +} + +// NewQuotedString returns a new quoted string option +func NewQuotedString(value *string) *QuotedString { + return &QuotedString{value: value} +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/runtime.go b/vendor/github.com/ory/dockertest/v3/docker/opts/runtime.go new file mode 100644 index 00000000..51374977 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/runtime.go @@ -0,0 +1,79 @@ +package opts + +import ( + "fmt" + "strings" + + "github.com/ory/dockertest/v3/docker/types" +) + +// RuntimeOpt defines a map of Runtimes +type RuntimeOpt struct { + name string + stockRuntimeName string + values *map[string]types.Runtime +} + +// NewNamedRuntimeOpt creates a new RuntimeOpt +func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { + if ref == nil { + ref = &map[string]types.Runtime{} + } + return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *RuntimeOpt) Name() string { + return o.name +} + +// Set validates and updates the list of Runtimes +func (o *RuntimeOpt) Set(val string) error { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.TrimSpace(parts[0]) + parts[1] = strings.TrimSpace(parts[1]) + if parts[0] == "" || parts[1] == "" { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.ToLower(parts[0]) + if parts[0] == o.stockRuntimeName { + return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) + } + + if _, ok := (*o.values)[parts[0]]; ok { + return fmt.Errorf("runtime '%s' was already defined", parts[0]) + } + + (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} + + return nil +} + +// String returns Runtime values as a string. +func (o *RuntimeOpt) String() string { + var out []string + for k := range *o.values { + out = append(out, k) + } + + return fmt.Sprintf("%v", out) +} + +// GetMap returns a map of Runtimes (name: path) +func (o *RuntimeOpt) GetMap() map[string]types.Runtime { + if o.values != nil { + return *o.values + } + + return map[string]types.Runtime{} +} + +// Type returns the type of the option +func (o *RuntimeOpt) Type() string { + return "runtime" +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/opts/ulimit.go b/vendor/github.com/ory/dockertest/v3/docker/opts/ulimit.go new file mode 100644 index 00000000..a2a65fcd --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/opts/ulimit.go @@ -0,0 +1,81 @@ +package opts + +import ( + "fmt" + + "github.com/docker/go-units" +) + +// UlimitOpt defines a map of Ulimits +type UlimitOpt struct { + values *map[string]*units.Ulimit +} + +// NewUlimitOpt creates a new UlimitOpt +func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &UlimitOpt{ref} +} + +// Set validates a Ulimit and sets its name as a key in UlimitOpt +func (o *UlimitOpt) Set(val string) error { + l, err := units.ParseUlimit(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +// String returns Ulimit values as a string. +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to Ulimits. +func (o *UlimitOpt) GetList() []*units.Ulimit { + var ulimits []*units.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + + return ulimits +} + +// Type returns the option type +func (o *UlimitOpt) Type() string { + return "ulimit" +} + +// NamedUlimitOpt defines a named map of Ulimits +type NamedUlimitOpt struct { + name string + UlimitOpt +} + +var _ NamedOption = &NamedUlimitOpt{} + +// NewNamedUlimitOpt creates a new NamedUlimitOpt +func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &NamedUlimitOpt{ + name: name, + UlimitOpt: *NewUlimitOpt(ref), + } +} + +// Name returns the option name +func (o *NamedUlimitOpt) Name() string { + return o.name +} diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/README.md similarity index 100% rename from vendor/github.com/docker/docker/pkg/archive/README.md rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/README.md diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive.go similarity index 92% rename from vendor/github.com/docker/docker/pkg/archive/archive.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive.go index bb623fa8..0d3974d2 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive.go @@ -1,4 +1,4 @@ -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "archive/tar" @@ -17,13 +17,12 @@ import ( "strconv" "strings" "syscall" - "time" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" + "github.com/ory/dockertest/v3/docker/pkg/fileutils" + "github.com/ory/dockertest/v3/docker/pkg/idtools" + "github.com/ory/dockertest/v3/docker/pkg/ioutils" + "github.com/ory/dockertest/v3/docker/pkg/pools" + "github.com/ory/dockertest/v3/docker/pkg/system" "github.com/sirupsen/logrus" ) @@ -52,7 +51,7 @@ type ( NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap - ChownOpts *idtools.Identity + ChownOpts *idtools.IDPair IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack @@ -72,13 +71,13 @@ type ( // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMapping *idtools.IdentityMapping + Untar func(io.Reader, string, *TarOptions) error + IDMappingsVar *idtools.IDMappings } -// NewDefaultArchiver returns a new Archiver without any IdentityMapping +// NewDefaultArchiver returns a new Archiver without any IDMappings func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} + return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}} } // breakoutError is used to differentiate errors related to breaking out @@ -127,7 +126,6 @@ func IsArchivePath(path string) bool { if err != nil { return false } - defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil @@ -362,12 +360,12 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro if err != nil { return nil, err } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) - hdr.Name = canonicalTarName(name, fi.IsDir()) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } @@ -420,9 +418,9 @@ type tarAppender struct { Buffer *bufio.Writer // for hardlink mapping - SeenFiles map[uint64]string - IdentityMapping *idtools.IdentityMapping - ChownOpts *idtools.Identity + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined @@ -431,26 +429,29 @@ type tarAppender struct { WhiteoutConverter tarWhiteoutConverter } -func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - IdentityMapping: idMapping, - ChownOpts: chownOpts, + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent posix-style //path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) string { - name = CanonicalTarNameForPath(name) +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } - return name + return name, nil } // addTarFile adds to the tar archive a file from `path` as `name` @@ -502,12 +503,14 @@ func (ta *tarAppender) addTarFile(path, name string) error { //handle re-mapping container ID mappings back to host ID mappings before //writing tar headers/files. We skip whiteout files because they were written //by the kernel and already have proper ownership relative to the host - if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { + if !isOverlayWhiteout && + !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && + !ta.IDMappings.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } - hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) if err != nil { return err } @@ -570,7 +573,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -650,7 +653,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { - chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err @@ -660,13 +663,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP || err == syscall.EPERM { + if err == syscall.ENOTSUP { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. - // EPERM occurs if modifying xattrs is not allowed. This can - // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } @@ -745,7 +746,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) compressWriter, options.ChownOpts, ) - ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) defer func() { // Make sure to check the error on Close. @@ -901,9 +902,9 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMapping.RootPair() - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) // Iterate through the files in the archive. loop: @@ -981,7 +982,7 @@ loop: } trBuf.Reset(tr) - if err := remapIDs(idMapping, hdr); err != nil { + if err := remapIDs(idMappings, hdr); err != nil { return err } @@ -1068,8 +1069,8 @@ func (archiver *Archiver) TarUntar(src, dst string) error { } defer archive.Close() options := &TarOptions{ - UIDMaps: archiver.IDMapping.UIDs(), - GIDMaps: archiver.IDMapping.GIDs(), + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), } return archiver.Untar(archive, dst, options) } @@ -1082,8 +1083,8 @@ func (archiver *Archiver) UntarPath(src, dst string) error { } defer archive.Close() options := &TarOptions{ - UIDMaps: archiver.IDMapping.UIDs(), - GIDMaps: archiver.IDMapping.GIDs(), + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), } return archiver.Untar(archive, dst, options) } @@ -1104,7 +1105,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner - rootIDs := archiver.IDMapping.RootPair() + rootIDs := archiver.IDMappingsVar.RootPair() // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { @@ -1157,14 +1158,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { if err != nil { return err } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - if err := remapIDs(archiver.IDMapping, hdr); err != nil { + if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { return err } @@ -1192,13 +1189,13 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { return err } -// IdentityMapping returns the IdentityMapping of the archiver. -func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { - return archiver.IDMapping +// IDMappings returns the IDMappings of the archiver. +func (archiver *Archiver) IDMappings() *idtools.IDMappings { + return archiver.IDMappingsVar } -func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { - ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err } diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_linux.go new file mode 100644 index 00000000..8d584b49 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_linux.go @@ -0,0 +1,92 @@ +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + + "github.com/ory/dockertest/v3/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_other.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_other.go new file mode 100644 index 00000000..5057a0a5 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_unix.go similarity index 67% rename from vendor/github.com/docker/docker/pkg/archive/archive_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_unix.go index 1eec912b..ccd92103 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_unix.go @@ -1,17 +1,18 @@ // +build !windows -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "archive/tar" + "bufio" "errors" + "fmt" "os" "path/filepath" "syscall" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/ory/dockertest/v3/docker/pkg/idtools" + "github.com/ory/dockertest/v3/docker/pkg/system" "golang.org/x/sys/unix" ) @@ -32,8 +33,8 @@ func getWalkRoot(srcPath string, include string) string { // CanonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. -func CanonicalTarNameForPath(p string) string { - return p // already unix-style +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style } // chmodTarEntry is used to adjust the file permissions used in tar header based @@ -68,19 +69,19 @@ func getInodeFromStat(stat interface{}) (inode uint64, err error) { return } -func getFileUIDGID(stat interface{}) (idtools.Identity, error) { +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { s, ok := stat.(*syscall.Stat_t) if !ok { - return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") } - return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil } // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - if rsystem.RunningInUserNS() { + if runningInUserNS() { // cannot create a device if running in user namespace return nil } @@ -112,3 +113,33 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { } return nil } + +// runningInUserNS detects whether we are currently running in a user namespace. +// Copied from github.com/opencontainers/runc/libcontainer/system/linux.go +// Copied from github.com/lxc/lxd/shared/util.go +func runningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + line := string(l) + var a, b, c int64 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + /* + * We assume we are in the initial user namespace if we have a full + * range - 4294967295 uids starting at uid 0. + */ + if a == 0 && b == 0 && c == 4294967295 { + return false + } + return true +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_windows.go similarity index 70% rename from vendor/github.com/docker/docker/pkg/archive/archive_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_windows.go index ae6b89fd..5e3b34b3 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/archive_windows.go @@ -1,12 +1,14 @@ -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "archive/tar" + "fmt" "os" "path/filepath" + "strings" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/longpath" + "github.com/ory/dockertest/v3/docker/pkg/idtools" + "github.com/ory/dockertest/v3/docker/pkg/longpath" ) // fixVolumePathPrefix does platform specific processing to ensure that if @@ -24,8 +26,16 @@ func getWalkRoot(srcPath string, include string) string { // CanonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. -func CanonicalTarNameForPath(p string) string { - return filepath.ToSlash(p) +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + } // chmodTarEntry is used to adjust the file permissions used in tar header based @@ -61,7 +71,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { return nil } -func getFileUIDGID(stat interface{}) (idtools.Identity, error) { +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { // no notion of file ownership mapping yet on Windows - return idtools.Identity{UID: 0, GID: 0}, nil + return idtools.IDPair{0, 0}, nil } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes.go similarity index 94% rename from vendor/github.com/docker/docker/pkg/archive/changes.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes.go index aedb91b0..15abebc9 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes.go @@ -1,4 +1,4 @@ -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "archive/tar" @@ -13,9 +13,9 @@ import ( "syscall" "time" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" + "github.com/ory/dockertest/v3/docker/pkg/idtools" + "github.com/ory/dockertest/v3/docker/pkg/pools" + "github.com/ory/dockertest/v3/docker/pkg/system" "github.com/sirupsen/logrus" ) @@ -63,16 +63,12 @@ func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } func (c changesByPath) Len() int { return len(c) } func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } -// Gnu tar doesn't have sub-second mtime precision. The go tar -// writer (1.10+) does when using PAX format, but we round times to seconds -// to ensure archives have the same hashes for backwards compatibility. -// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. -// -// Non-sub-second is problematic when we apply changes via tar -// files. We handle this by comparing for exact times, *or* same +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same // second count and either a or b having exactly 0 nanoseconds func sameFsTime(a, b time.Time) bool { - return a.Equal(b) || + return a == b || (a.Unix() == b.Unix() && (a.Nanosecond() == 0 || b.Nanosecond() == 0)) } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_linux.go similarity index 89% rename from vendor/github.com/docker/docker/pkg/archive/changes_linux.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_linux.go index f8792b3d..73b90a4c 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_linux.go @@ -1,4 +1,4 @@ -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "bytes" @@ -9,7 +9,7 @@ import ( "syscall" "unsafe" - "github.com/docker/docker/pkg/system" + "github.com/ory/dockertest/v3/docker/pkg/system" "golang.org/x/sys/unix" ) @@ -284,3 +284,30 @@ func clen(n []byte) int { } return len(n) } + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, overlayDeletedFile, nil) +} + +func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert + return path, nil + } + } + if fi.Mode()&os.ModeDir != 0 { + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if len(opaque) == 1 && opaque[0] == 'y' { + return path, nil + } + } + + return "", nil + +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_other.go similarity index 93% rename from vendor/github.com/docker/docker/pkg/archive/changes_other.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_other.go index ba744741..e6ea91b5 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_other.go @@ -1,6 +1,6 @@ // +build !linux -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "fmt" @@ -9,7 +9,7 @@ import ( "runtime" "strings" - "github.com/docker/docker/pkg/system" + "github.com/ory/dockertest/v3/docker/pkg/system" ) func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_unix.go similarity index 59% rename from vendor/github.com/docker/docker/pkg/archive/changes_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_unix.go index 06217b71..11552957 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_unix.go @@ -1,12 +1,12 @@ // +build !windows -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "os" "syscall" - "github.com/docker/docker/pkg/system" + "github.com/ory/dockertest/v3/docker/pkg/system" "golang.org/x/sys/unix" ) @@ -16,13 +16,7 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { oldStat.UID() != newStat.UID() || oldStat.GID() != newStat.GID() || oldStat.Rdev() != newStat.Rdev() || - // Don't look at size or modification time for dirs, its not a good - // measure of change. See https://github.com/moby/moby/issues/9874 - // for a description of the issue with modification time, and - // https://github.com/moby/moby/pull/11422 for the change. - // (Note that in the Windows implementation of this function, - // modification time IS taken as a change). See - // https://github.com/moby/moby/pull/37982 for more information. + // Don't look at size for dirs, its not a good measure of change (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { return true diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_windows.go new file mode 100644 index 00000000..a56c4c1e --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" + +import ( + "os" + + "github.com/ory/dockertest/v3/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/copy.go similarity index 96% rename from vendor/github.com/docker/docker/pkg/archive/copy.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/copy.go index 57fddac0..658b4513 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/copy.go @@ -1,4 +1,4 @@ -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "archive/tar" @@ -9,7 +9,7 @@ import ( "path/filepath" "strings" - "github.com/docker/docker/pkg/system" + "github.com/ory/dockertest/v3/docker/pkg/system" "github.com/sirupsen/logrus" ) @@ -336,14 +336,6 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read return } - // srcContent tar stream, as served by TarWithOptions(), is - // definitely in PAX format, but tar.Next() mistakenly guesses it - // as USTAR, which creates a problem: if the newBase is >100 - // characters long, WriteHeader() returns an error like - // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". - // - // To fix, set the format to PAX here. See docker/for-linux issue #484. - hdr.Format = tar.FormatPAX hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) if hdr.Typeflag == tar.TypeLink { hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/copy_unix.go similarity index 62% rename from vendor/github.com/docker/docker/pkg/archive/copy_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/copy_unix.go index 3958364f..edb3b93c 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/copy_unix.go @@ -1,6 +1,6 @@ // +build !windows -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "path/filepath" diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/copy_windows.go similarity index 58% rename from vendor/github.com/docker/docker/pkg/archive/copy_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/copy_windows.go index a878d1ba..2cc7ea18 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/copy_windows.go @@ -1,4 +1,4 @@ -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "path/filepath" diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/diff.go similarity index 93% rename from vendor/github.com/docker/docker/pkg/archive/diff.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/diff.go index 146e21fe..656c82ee 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/diff.go @@ -1,4 +1,4 @@ -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "archive/tar" @@ -10,9 +10,9 @@ import ( "runtime" "strings" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" + "github.com/ory/dockertest/v3/docker/pkg/idtools" + "github.com/ory/dockertest/v3/docker/pkg/pools" + "github.com/ory/dockertest/v3/docker/pkg/system" "github.com/sirupsen/logrus" ) @@ -33,7 +33,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) @@ -192,7 +192,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, srcData = tmpFile } - if err := remapIDs(idMapping, srcHdr); err != nil { + if err := remapIDs(idMappings, srcHdr); err != nil { return 0, err } @@ -240,21 +240,17 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp dest = filepath.Clean(dest) // We need to be able to set any perms - if runtime.GOOS != "windows" { - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) + oldmask, err := system.Umask(0) + if err != nil { + return 0, err } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform if decompress { - decompLayer, err := DecompressStream(layer) + layer, err = DecompressStream(layer) if err != nil { return 0, err } - defer decompLayer.Close() - layer = decompLayer } return UnpackLayer(dest, layer, options) } diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/time_linux.go similarity index 68% rename from vendor/github.com/docker/docker/pkg/archive/time_linux.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/time_linux.go index 797143ee..99f772bf 100644 --- a/vendor/github.com/docker/docker/pkg/archive/time_linux.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/time_linux.go @@ -1,4 +1,4 @@ -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "syscall" @@ -9,7 +9,7 @@ func timeToTimespec(time time.Time) (ts syscall.Timespec) { if time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 - ts.Nsec = (1 << 30) - 2 + ts.Nsec = ((1 << 30) - 2) return } return syscall.NsecToTimespec(time.UnixNano()) diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/time_unsupported.go similarity index 73% rename from vendor/github.com/docker/docker/pkg/archive/time_unsupported.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/time_unsupported.go index f58bf227..58ff9219 100644 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/time_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/whiteouts.go similarity index 93% rename from vendor/github.com/docker/docker/pkg/archive/whiteouts.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/whiteouts.go index 4c072a87..3c73d2c2 100644 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/whiteouts.go @@ -1,4 +1,4 @@ -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" // Whiteouts are files with a special meaning for the layered filesystem. // Docker uses AUFS whiteout files inside exported archives. In other diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/wrap.go similarity index 94% rename from vendor/github.com/docker/docker/pkg/archive/wrap.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/archive/wrap.go index 85435694..72d24de6 100644 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/archive/wrap.go @@ -1,4 +1,4 @@ -package archive // import "github.com/docker/docker/pkg/archive" +package archive // import "github.com/ory/dockertest/v3/docker/pkg/archive" import ( "archive/tar" diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils.go similarity index 97% rename from vendor/github.com/docker/docker/pkg/fileutils/fileutils.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils.go index 34f1c726..d8a631b3 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils.go @@ -1,4 +1,4 @@ -package fileutils // import "github.com/docker/docker/pkg/fileutils" +package fileutils // import "github.com/ory/dockertest/v3/docker/pkg/fileutils" import ( "errors" @@ -13,7 +13,7 @@ import ( "github.com/sirupsen/logrus" ) -// PatternMatcher allows checking paths against a list of patterns +// PatternMatcher allows checking paths agaist a list of patterns type PatternMatcher struct { patterns []*Pattern exclusions bool @@ -106,7 +106,7 @@ func (pm *PatternMatcher) Patterns() []*Pattern { return pm.patterns } -// Pattern defines a single regexp used to filter file paths. +// Pattern defines a single regexp used used to filter file paths. type Pattern struct { cleanedPattern string dirs []string diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils_darwin.go similarity index 84% rename from vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils_darwin.go index e40cc271..1a57911c 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils_darwin.go @@ -1,4 +1,4 @@ -package fileutils // import "github.com/docker/docker/pkg/fileutils" +package fileutils // import "github.com/ory/dockertest/v3/docker/pkg/fileutils" import ( "os" diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils_unix.go similarity index 84% rename from vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils_unix.go index 565396f1..0b2ce977 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils_unix.go @@ -1,6 +1,6 @@ // +build linux freebsd -package fileutils // import "github.com/docker/docker/pkg/fileutils" +package fileutils // import "github.com/ory/dockertest/v3/docker/pkg/fileutils" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils_windows.go similarity index 62% rename from vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils_windows.go index 3f1ebb65..0be14086 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/fileutils/fileutils_windows.go @@ -1,4 +1,4 @@ -package fileutils // import "github.com/docker/docker/pkg/fileutils" +package fileutils // import "github.com/ory/dockertest/v3/docker/pkg/fileutils" // GetTotalUsedFds Returns the number of used File Descriptors. Not supported // on Windows. diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_linux.go new file mode 100644 index 00000000..adb5f8be --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_linux.go @@ -0,0 +1,21 @@ +package homedir // import "github.com/ory/dockertest/v3/docker/pkg/homedir" + +import ( + "os" + + "github.com/ory/dockertest/v3/docker/pkg/idtools" +) + +// GetStatic returns the home directory for the current user without calling +// os/user.Current(). This is useful for static-linked binary on glibc-based +// system, because a call to os/user.Current() in a static binary leads to +// segfault due to a glibc issue that won't be fixed in a short term. +// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +func GetStatic() (string, error) { + uid := os.Getuid() + usr, err := idtools.LookupUID(uid) + if err != nil { + return "", err + } + return usr.Home, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_others.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_others.go new file mode 100644 index 00000000..fd38f920 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_others.go @@ -0,0 +1,13 @@ +// +build !linux + +package homedir // import "github.com/ory/dockertest/v3/docker/pkg/homedir" + +import ( + "errors" +) + +// GetStatic is not needed for non-linux systems. +// (Precisely, it is needed only for glibc-based linux systems.) +func GetStatic() (string, error) { + return "", errors.New("homedir.GetStatic() is not supported on this system") +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_unix.go new file mode 100644 index 00000000..214c84a1 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package homedir // import "github.com/ory/dockertest/v3/docker/pkg/homedir" + +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 00000000..b1cd7c66 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir // import "github.com/ory/dockertest/v3/docker/pkg/homedir" + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/idtools.go similarity index 83% rename from vendor/github.com/docker/docker/pkg/idtools/idtools.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/idtools.go index 230422ea..0aad59fd 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/idtools.go @@ -1,4 +1,4 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" +package idtools // import "github.com/ory/dockertest/v3/docker/pkg/idtools" import ( "bufio" @@ -30,30 +30,30 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } const ( - subuidFileName = "/etc/subuid" - subgidFileName = "/etc/subgid" + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" ) // MkdirAllAndChown creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. -func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, true, true) +func MkdirAllAndChown(path string, mode os.FileMode, owner IDPair) error { + return mkdirAs(path, mode, owner.UID, owner.GID, true, true) } // MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. // If the directory already exists, this function still changes ownership. // Note that unlike os.Mkdir(), this function does not return IsExist error // in case path already exists. -func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, false, true) +func MkdirAndChown(path string, mode os.FileMode, owner IDPair) error { + return mkdirAs(path, mode, owner.UID, owner.GID, false, true) } // MkdirAllAndChownNew creates a directory (include any along the path) and then modifies // ownership ONLY of newly created directories to the requested uid/gid. If the // directories along the path exist, no change of ownership will be performed -func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, true, false) +func MkdirAllAndChownNew(path string, mode os.FileMode, owner IDPair) error { + return mkdirAs(path, mode, owner.UID, owner.GID, true, false) } // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. @@ -102,23 +102,22 @@ func toHost(contID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) } -// Identity is either a UID and GID pair or a SID (but not both) -type Identity struct { +// IDPair is a UID and GID pair +type IDPair struct { UID int GID int - SID string } -// IdentityMapping contains a mappings of UIDs and GIDs -type IdentityMapping struct { +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { uids []IDMap gids []IDMap } -// NewIdentityMapping takes a requested user and group name and +// NewIDMappings takes a requested user and group name and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair -func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) { +func NewIDMappings(username, groupname string) (*IDMappings, error) { subuidRanges, err := parseSubuid(username) if err != nil { return nil, err @@ -134,7 +133,7 @@ func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) { return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) } - return &IdentityMapping{ + return &IDMappings{ uids: createIDMap(subuidRanges), gids: createIDMap(subgidRanges), }, nil @@ -142,21 +141,21 @@ func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) { // NewIDMappingsFromMaps creates a new mapping from two slices // Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { - return &IdentityMapping{uids: uids, gids: gids} +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} } // RootPair returns a uid and gid pair for the root user. The error is ignored // because a root user always exists, and the defaults are correct when the uid // and gid maps are empty. -func (i *IdentityMapping) RootPair() Identity { +func (i *IDMappings) RootPair() IDPair { uid, gid, _ := GetRootUIDGID(i.uids, i.gids) - return Identity{UID: uid, GID: gid} + return IDPair{UID: uid, GID: gid} } // ToHost returns the host UID and GID for the container uid, gid. // Remapping is only performed if the ids aren't already the remapped root ids -func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { var err error target := i.RootPair() @@ -174,7 +173,7 @@ func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { } // ToContainer returns the container UID and GID for the host uid and gid -func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { uid, err := toContainer(pair.UID, i.uids) if err != nil { return -1, -1, err @@ -184,19 +183,19 @@ func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { } // Empty returns true if there are no id mappings -func (i *IdentityMapping) Empty() bool { +func (i *IDMappings) Empty() bool { return len(i.uids) == 0 && len(i.gids) == 0 } // UIDs return the UID mapping // TODO: remove this once everything has been refactored to use pairs -func (i *IdentityMapping) UIDs() []IDMap { +func (i *IDMappings) UIDs() []IDMap { return i.uids } // GIDs return the UID mapping // TODO: remove this once everything has been refactored to use pairs -func (i *IdentityMapping) GIDs() []IDMap { +func (i *IDMappings) GIDs() []IDMap { return i.gids } diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/idtools_unix.go similarity index 94% rename from vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/idtools_unix.go index fb239743..a82f2e93 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/idtools_unix.go @@ -1,6 +1,6 @@ // +build !windows -package idtools // import "github.com/docker/docker/pkg/idtools" +package idtools // import "github.com/ory/dockertest/v3/docker/pkg/idtools" import ( "bytes" @@ -12,7 +12,7 @@ import ( "sync" "syscall" - "github.com/docker/docker/pkg/system" + "github.com/ory/dockertest/v3/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" ) @@ -21,12 +21,11 @@ var ( getentCmd string ) -func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists - var paths []string stat, err := system.Stat(path) @@ -39,7 +38,7 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting } // short-circuit--we were called with an existing directory and chown was requested - return lazyChown(path, owner.UID, owner.GID, stat) + return lazyChown(path, ownerUID, ownerGID, stat) } if os.IsNotExist(err) { @@ -70,7 +69,7 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { - if err := lazyChown(pathComponent, owner.UID, owner.GID, nil); err != nil { + if err := lazyChown(pathComponent, ownerUID, ownerGID, nil); err != nil { return err } } @@ -79,7 +78,7 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, pair Identity) bool { +func CanAccess(path string, pair IDPair) bool { statInfo, err := system.Stat(path) if err != nil { return false diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 00000000..96ecb1ce --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,23 @@ +package idtools // import "github.com/ory/dockertest/v3/docker/pkg/idtools" + +import ( + "os" + + "github.com/ory/dockertest/v3/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode, ""); err != nil { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/usergroupadd_linux.go similarity index 98% rename from vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/usergroupadd_linux.go index 6272c5a4..dc583d94 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/usergroupadd_linux.go @@ -1,4 +1,4 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" +package idtools // import "github.com/ory/dockertest/v3/docker/pkg/idtools" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/usergroupadd_unsupported.go similarity index 83% rename from vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/usergroupadd_unsupported.go index e7c4d631..a9fd1681 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/usergroupadd_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package idtools // import "github.com/docker/docker/pkg/idtools" +package idtools // import "github.com/ory/dockertest/v3/docker/pkg/idtools" import "fmt" diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/utils_unix.go similarity index 90% rename from vendor/github.com/docker/docker/pkg/idtools/utils_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/utils_unix.go index 903ac450..6eb0848b 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/idtools/utils_unix.go @@ -1,6 +1,6 @@ // +build !windows -package idtools // import "github.com/docker/docker/pkg/idtools" +package idtools // import "github.com/ory/dockertest/v3/docker/pkg/idtools" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/buffer.go similarity index 91% rename from vendor/github.com/docker/docker/pkg/ioutils/buffer.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/buffer.go index 466f7929..8ec65c71 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/buffer.go @@ -1,4 +1,4 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" +package ioutils // import "github.com/ory/dockertest/v3/docker/pkg/ioutils" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/bytespipe.go similarity index 98% rename from vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/bytespipe.go index d4bbf3c9..ec30e888 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/bytespipe.go @@ -1,4 +1,4 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" +package ioutils // import "github.com/ory/dockertest/v3/docker/pkg/ioutils" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/fswriters.go similarity index 98% rename from vendor/github.com/docker/docker/pkg/ioutils/fswriters.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/fswriters.go index 534d66ac..4c08d9dd 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/fswriters.go @@ -1,4 +1,4 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" +package ioutils // import "github.com/ory/dockertest/v3/docker/pkg/ioutils" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/readers.go similarity index 97% rename from vendor/github.com/docker/docker/pkg/ioutils/readers.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/readers.go index 1f657bd3..eccf43b3 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/readers.go @@ -1,10 +1,11 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" +package ioutils // import "github.com/ory/dockertest/v3/docker/pkg/ioutils" import ( - "context" "crypto/sha256" "encoding/hex" "io" + + "golang.org/x/net/context" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/temp_unix.go similarity index 71% rename from vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/temp_unix.go index dc894f91..895f5db0 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/temp_unix.go @@ -1,6 +1,6 @@ // +build !windows -package ioutils // import "github.com/docker/docker/pkg/ioutils" +package ioutils // import "github.com/ory/dockertest/v3/docker/pkg/ioutils" import "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/temp_windows.go similarity index 70% rename from vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/temp_windows.go index ecaba2e3..2c535d69 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/temp_windows.go @@ -1,9 +1,9 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" +package ioutils // import "github.com/ory/dockertest/v3/docker/pkg/ioutils" import ( "io/ioutil" - "github.com/docker/docker/pkg/longpath" + "github.com/ory/dockertest/v3/docker/pkg/longpath" ) // TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/writeflusher.go similarity index 96% rename from vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/writeflusher.go index 91b8d182..997d0056 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/writeflusher.go @@ -1,4 +1,4 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" +package ioutils // import "github.com/ory/dockertest/v3/docker/pkg/ioutils" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/writers.go similarity index 94% rename from vendor/github.com/docker/docker/pkg/ioutils/writers.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/writers.go index 61c67949..6d63c90d 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/ioutils/writers.go @@ -1,4 +1,4 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" +package ioutils // import "github.com/ory/dockertest/v3/docker/pkg/ioutils" import "io" diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 00000000..c03014bc --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,335 @@ +package jsonmessage // import "github.com/ory/dockertest/v3/docker/pkg/jsonmessage" + +import ( + "encoding/json" + "fmt" + "io" + "os" + "strings" + "time" + + gotty "github.com/Nvveen/Gotty" + "github.com/ory/dockertest/v3/docker/pkg/term" + units "github.com/docker/go-units" +) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time isalways the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` + // If true, don't show xB/yB + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` + nowFunc func() time.Time + winSize int +} + +func (p *JSONProgress) String() string { + var ( + width = p.width() + pbBox string + numbersBox string + timeLeftBox string + ) + if p.Current <= 0 && p.Total <= 0 { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := p.now().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// shim for testing +func (p *JSONProgress) now() time.Time { + if p.nowFunc == nil { + p.nowFunc = func() time.Time { + return time.Now().UTC() + } + } + return p.nowFunc() +} + +// shim for testing +func (p *JSONProgress) width() int { + if p.winSize != 0 { + return p.winSize + } + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + return int(ws.Width) + } + return 200 +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing and image id after building. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ +type termInfo interface { + Parse(attr string, params ...interface{}) (string, error) +} + +type noTermInfo struct{} // canary used when no terminfo. + +func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { + return "", fmt.Errorf("noTermInfo") +} + +func clearLine(out io.Writer, ti termInfo) { + // el2 (clear whole line) is not exposed by terminfo. + + // First clear line from beginning to cursor + if attr, err := ti.Parse("el1"); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[1K") + } + // Then clear line from cursor to end + if attr, err := ti.Parse("el"); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[K") + } +} + +func cursorUp(out io.Writer, ti termInfo, l int) { + if l == 0 { // Should never be the case, but be tolerant + return + } + if attr, err := ti.Parse("cuu", l); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[%dA", l) + } +} + +func cursorDown(out io.Writer, ti termInfo, l int) { + if l == 0 { // Should never be the case, but be tolerant + return + } + if attr, err := ti.Parse("cud", l); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[%dB", l) + } +} + +// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` +// is a terminal. If this is the case, it will erase the entire current line +// when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("authentication is required") + } + return jm.Error + } + var endl string + if termInfo != nil && jm.Stream == "" && jm.Progress != nil { + clearLine(out, termInfo) + endl = "\r" + fmt.Fprintf(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && termInfo != nil { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + ) + + var termInfo termInfo + + if isTerminal { + term := os.Getenv("TERM") + if term == "" { + term = "vt102" + } + + var err error + if termInfo, err = gotty.OpenTermInfo(term); err != nil { + termInfo = &noTermInfo{} + } + } + + for { + diff := 0 + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm.Aux) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = len(ids) + ids[jm.ID] = line + if termInfo != nil { + fmt.Fprintf(out, "\n") + } + } + diff = len(ids) - line + if termInfo != nil { + cursorUp(out, termInfo, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]int) + } + err := jm.Display(out, termInfo) + if jm.ID != "" && termInfo != nil { + cursorDown(out, termInfo, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/longpath/longpath.go similarity index 90% rename from vendor/github.com/docker/docker/pkg/longpath/longpath.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/longpath/longpath.go index 4177affb..d3d0a659 100644 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/longpath/longpath.go @@ -2,7 +2,7 @@ // in Windows, which are expected to be prepended with `\\?\` and followed by either // a drive letter, a UNC server\share, or a volume identifier. -package longpath // import "github.com/docker/docker/pkg/longpath" +package longpath // import "github.com/ory/dockertest/v3/docker/pkg/longpath" import ( "strings" diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags.go similarity index 89% rename from vendor/github.com/docker/docker/pkg/mount/flags.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags.go index ffd47331..24422096 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags.go @@ -1,4 +1,4 @@ -package mount // import "github.com/docker/docker/pkg/mount" +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" import ( "fmt" @@ -135,3 +135,15 @@ func parseOptions(options string) (int, string) { } return flag, strings.Join(data, ",") } + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags_freebsd.go similarity index 92% rename from vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags_freebsd.go index ef35ef90..c9358265 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags_freebsd.go @@ -1,6 +1,6 @@ // +build freebsd,cgo -package mount // import "github.com/docker/docker/pkg/mount" +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" /* #include diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags_linux.go similarity index 97% rename from vendor/github.com/docker/docker/pkg/mount/flags_linux.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags_linux.go index a1b199a3..315346dd 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags_linux.go @@ -1,4 +1,4 @@ -package mount // import "github.com/docker/docker/pkg/mount" +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" import ( "golang.org/x/sys/unix" diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags_unsupported.go similarity index 87% rename from vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags_unsupported.go index cc6c4759..fe03e2c5 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/flags_unsupported.go @@ -1,6 +1,6 @@ // +build !linux,!freebsd freebsd,!cgo -package mount // import "github.com/docker/docker/pkg/mount" +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" // These flags are unsupported. const ( diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mount.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mount.go new file mode 100644 index 00000000..17ad6cf5 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mount.go @@ -0,0 +1,110 @@ +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" + +import ( + "sort" + "strings" + + "syscall" + + "github.com/sirupsen/logrus" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return unmount(target, mntDetach) +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + logrus.Debugf("Trying to unmount %s", m.Mountpoint) + err = unmount(m.Mountpoint, mntDetach) + if err != nil { + // If the error is EINVAL either this whole package is wrong (invalid flags passed to unmount(2)) or this is + // not a mountpoint (which is ok in this case). + // Meanwhile calling `Mounted()` is very expensive. + // + // We've purposefully used `syscall.EINVAL` here instead of `unix.EINVAL` to avoid platform branching + // Since `EINVAL` is defined for both Windows and Linux in the `syscall` package (and other platforms), + // this is nicer than defining a custom value that we can refer to in each platform file. + if err == syscall.EINVAL { + continue + } + if i == len(mounts)-1 { + if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { + return err + } + continue + } + // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem + logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) + continue + } + + logrus.Debugf("Unmounted %s", m.Mountpoint) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mounter_freebsd.go similarity index 78% rename from vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mounter_freebsd.go index 09ad3606..4e2a5033 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mounter_freebsd.go @@ -1,4 +1,4 @@ -package mount // import "github.com/docker/docker/pkg/mount" +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" /* #include @@ -11,9 +11,11 @@ package mount // import "github.com/docker/docker/pkg/mount" import "C" import ( + "fmt" "strings" - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func allocateIOVecs(options []string) []C.struct_iovec { @@ -47,13 +49,12 @@ func mount(device, target, mType string, flag uintptr, data string) error { } if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - return &mountError{ - op: "mount", - source: device, - target: target, - flags: flag, - err: syscall.Errno(errno), - } + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) } return nil } + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mounter_linux.go similarity index 73% rename from vendor/github.com/docker/docker/pkg/mount/mounter_linux.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mounter_linux.go index a0a1ad23..426eadb1 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mounter_linux.go @@ -1,4 +1,4 @@ -package mount // import "github.com/docker/docker/pkg/mount" +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" import ( "golang.org/x/sys/unix" @@ -33,41 +33,25 @@ func mount(device, target, mType string, flags uintptr, data string) error { // Initial call applying all non-propagation flags for mount // or remount with changed data if err := unix.Mount(device, target, mType, oflags, data); err != nil { - return &mountError{ - op: "mount", - source: device, - target: target, - flags: oflags, - data: data, - err: err, - } + return err } } if flags&ptypes != 0 { // Change the propagation type. if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { - return &mountError{ - op: "remount", - target: target, - flags: flags & pflags, - err: err, - } + return err } } if oflags&broflags == broflags { // Remount the bind to apply read only. - if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { - return &mountError{ - op: "remount-ro", - target: target, - flags: oflags | unix.MS_REMOUNT, - err: err, - } - - } + return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") } return nil } + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mounter_unsupported.go new file mode 100644 index 00000000..8da9dc2a --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo.go similarity index 93% rename from vendor/github.com/docker/docker/pkg/mount/mountinfo.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo.go index ecd03fc0..1a3c3aa7 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo.go @@ -1,4 +1,4 @@ -package mount // import "github.com/docker/docker/pkg/mount" +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" // Info reveals information about a particular mounted filesystem. This // struct is populated from the content in the /proc//mountinfo file. diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_freebsd.go similarity index 74% rename from vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_freebsd.go index 36c89dc1..44cfc3a1 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_freebsd.go @@ -1,4 +1,4 @@ -package mount // import "github.com/docker/docker/pkg/mount" +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" /* #include @@ -15,7 +15,7 @@ import ( // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts. -func parseMountTable(filter FilterFunc) ([]*Info, error) { +func parseMountTable() ([]*Info, error) { var rawEntries *C.struct_statfs count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) @@ -32,24 +32,10 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) { var out []*Info for _, entry := range entries { var mountinfo Info - var skip, stop bool mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) - - if filter != nil { - // filter out entries we're not interested in - skip, stop = filter(p) - if skip { - continue - } - } - mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) - out = append(out, &mountinfo) - if stop { - break - } } return out, nil } diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_linux.go new file mode 100644 index 00000000..5e3d725a --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_linux.go @@ -0,0 +1,93 @@ +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_unsupported.go similarity index 62% rename from vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_unsupported.go index fd16d3ed..cea24bf9 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_unsupported.go @@ -1,12 +1,12 @@ // +build !windows,!linux,!freebsd freebsd,!cgo -package mount // import "github.com/docker/docker/pkg/mount" +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" import ( "fmt" "runtime" ) -func parseMountTable(f FilterFunc) ([]*Info, error) { +func parseMountTable() ([]*Info, error) { return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_windows.go new file mode 100644 index 00000000..cc8506ca --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/sharedsubtree_linux.go similarity index 85% rename from vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/mount/sharedsubtree_linux.go index 8a100f0b..2b337667 100644 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/mount/sharedsubtree_linux.go @@ -1,4 +1,4 @@ -package mount // import "github.com/docker/docker/pkg/mount" +package mount // import "github.com/ory/dockertest/v3/docker/pkg/mount" // MakeShared ensures a mounted filesystem has the SHARED mount option enabled. // See the supported options in flags.go for further reference. @@ -48,22 +48,18 @@ func MakeRUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, "runbindable") } -// MakeMount ensures that the file or directory given is a mount point, -// bind mounting it to itself it case it is not. -func MakeMount(mnt string) error { - mounted, err := Mounted(mnt) +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) if err != nil { return err } - if mounted { - return nil - } - - return Mount(mnt, mnt, "none", "bind") -} -func ensureMountedAs(mountPoint, options string) error { - if err := MakeMount(mountPoint); err != nil { + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { return err } diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/pools/pools.go similarity index 96% rename from vendor/github.com/docker/docker/pkg/pools/pools.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/pools/pools.go index 46339c28..ee72fbba 100644 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/pools/pools.go @@ -7,14 +7,14 @@ // // Utility functions which operate on pools should be added to this // package to allow them to be reused. -package pools // import "github.com/docker/docker/pkg/pools" +package pools // import "github.com/ory/dockertest/v3/docker/pkg/pools" import ( "bufio" "io" "sync" - "github.com/docker/docker/pkg/ioutils" + "github.com/ory/dockertest/v3/docker/pkg/ioutils" ) const buffer32K = 32 * 1024 diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/stdcopy/stdcopy.go new file mode 100644 index 00000000..f6e0916c --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/stdcopy/stdcopy.go @@ -0,0 +1,190 @@ +package stdcopy // import "github.com/ory/dockertest/v3/docker/pkg/stdcopy" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + +const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + // Systemerr represents errors originating from the system that make it + // into the the multiplexed stream. + Systemerr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} + +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { + io.Writer + prefix byte +} + +// Write sends the buffer to the underneath writer. +// It inserts the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(p []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + if p == nil { + return 0, nil + } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) + buf := bufPool.Get().(*bytes.Buffer) + buf.Write(header[:]) + buf.Write(p) + + n, err = w.Writer.Write(buf.Bytes()) + n -= stdWriterPrefixLen + if n < 0 { + n = 0 + } + + buf.Reset() + bufPool.Put(buf) + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), + } +} + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + stream := StdType(buf[stdWriterFdIndex]) + // Check the first byte to know where to write + switch stream { + case Stdin: + fallthrough + case Stdout: + // Write on stdout + out = dstout + case Stderr: + // Write on stderr + out = dsterr + case Systemerr: + // If we're on Systemerr, we won't write anywhere. + // NB: if this code changes later, make sure you don't try to write + // to outstream if Systemerr is the stream + out = nil + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // we might have an error from the source mixed up in our multiplexed + // stream. if we do, return it. + if stream == Systemerr { + return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + return 0, ew + } + + // If the frame has not been fully written: error + if nw != frameSize { + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/chtimes.go similarity index 90% rename from vendor/github.com/docker/docker/pkg/system/chtimes.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/chtimes.go index c26a4e24..646aeccd 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/chtimes.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "os" diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/chtimes_unix.go similarity index 78% rename from vendor/github.com/docker/docker/pkg/system/chtimes_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/chtimes_unix.go index 259138a4..fb711f38 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/chtimes_unix.go @@ -1,6 +1,6 @@ // +build !windows -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "time" diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/chtimes_windows.go similarity index 90% rename from vendor/github.com/docker/docker/pkg/system/chtimes_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/chtimes_windows.go index d3a115ff..1366d158 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/chtimes_windows.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "time" diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/errors.go similarity index 82% rename from vendor/github.com/docker/docker/pkg/system/errors.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/errors.go index 2573d716..56d345ef 100644 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/errors.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/exitcode.go similarity index 85% rename from vendor/github.com/docker/docker/pkg/system/exitcode.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/exitcode.go index 4ba8fe35..78297419 100644 --- a/vendor/github.com/docker/docker/pkg/system/exitcode.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/exitcode.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/filesys.go similarity index 97% rename from vendor/github.com/docker/docker/pkg/system/filesys.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/filesys.go index adeb1630..50998526 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/filesys.go @@ -1,6 +1,6 @@ // +build !windows -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/filesys_windows.go similarity index 95% rename from vendor/github.com/docker/docker/pkg/system/filesys_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/filesys_windows.go index 3049ff38..edd869c5 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/filesys_windows.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "os" @@ -11,13 +11,14 @@ import ( "time" "unsafe" - winio "github.com/Microsoft/go-winio" "golang.org/x/sys/windows" ) const ( // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" ) // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory @@ -102,13 +103,13 @@ func mkdirall(path string, applyACL bool, sddl string) error { // and Local System. func mkdirWithACL(name string, sddl string) error { sa := windows.SecurityAttributes{Length: 0} - sd, err := winio.SddlToSecurityDescriptor(sddl) + sd, err := windows.SecurityDescriptorFromString(sddl) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} } sa.Length = uint32(unsafe.Sizeof(sa)) sa.InheritHandle = 1 - sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + sa.SecurityDescriptor = sd namep, err := windows.UTF16PtrFromString(name) if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/init.go similarity index 83% rename from vendor/github.com/docker/docker/pkg/system/init.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/init.go index a17597aa..aac11a52 100644 --- a/vendor/github.com/docker/docker/pkg/system/init.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/init.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "syscall" diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/system/init_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/init_unix.go new file mode 100644 index 00000000..542a7674 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/init_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" + +// InitLCOW does nothing since LCOW is a windows only feature +func InitLCOW(experimental bool) { +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/system/init_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/init_windows.go new file mode 100644 index 00000000..78e97b82 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/init_windows.go @@ -0,0 +1,12 @@ +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" + +// lcowSupported determines if Linux Containers on Windows are supported. +var lcowSupported = false + +// InitLCOW sets whether LCOW is supported or not +func InitLCOW(experimental bool) { + v := GetOSVersion() + if experimental && v.Build >= 16299 { + lcowSupported = true + } +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lcow.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lcow.go new file mode 100644 index 00000000..31fb7437 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lcow.go @@ -0,0 +1,69 @@ +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" + +import ( + "fmt" + "runtime" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ValidatePlatform determines if a platform structure is valid. +// TODO This is a temporary function - can be replaced by parsing from +// https://github.com/containerd/containerd/pull/1403/files at a later date. +// @jhowardmsft +func ValidatePlatform(platform *specs.Platform) error { + platform.Architecture = strings.ToLower(platform.Architecture) + platform.OS = strings.ToLower(platform.OS) + // Based on https://github.com/moby/moby/pull/34642#issuecomment-330375350, do + // not support anything except operating system. + if platform.Architecture != "" { + return fmt.Errorf("invalid platform architecture %q", platform.Architecture) + } + if platform.OS != "" { + if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { + return fmt.Errorf("invalid platform os %q", platform.OS) + } + } + if len(platform.OSFeatures) != 0 { + return fmt.Errorf("invalid platform osfeatures %q", platform.OSFeatures) + } + if platform.OSVersion != "" { + return fmt.Errorf("invalid platform osversion %q", platform.OSVersion) + } + if platform.Variant != "" { + return fmt.Errorf("invalid platform variant %q", platform.Variant) + } + return nil +} + +// ParsePlatform parses a platform string in the format os[/arch[/variant] +// into an OCI image-spec platform structure. +// TODO This is a temporary function - can be replaced by parsing from +// https://github.com/containerd/containerd/pull/1403/files at a later date. +// @jhowardmsft +func ParsePlatform(in string) *specs.Platform { + p := &specs.Platform{} + elements := strings.SplitN(strings.ToLower(in), "/", 3) + if len(elements) == 3 { + p.Variant = elements[2] + } + if len(elements) >= 2 { + p.Architecture = elements[1] + } + if len(elements) >= 1 { + p.OS = elements[0] + } + return p +} + +// IsOSSupported determines if an operating system is supported by the host +func IsOSSupported(os string) bool { + if runtime.GOOS == os { + return true + } + if LCOWSupported() && os == "linux" { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lcow_unix.go similarity index 65% rename from vendor/github.com/docker/docker/pkg/system/lcow_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/lcow_unix.go index 26397fb8..e3e8248d 100644 --- a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lcow_unix.go @@ -1,6 +1,6 @@ // +build !windows -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" // LCOWSupported returns true if Linux containers on Windows are supported. func LCOWSupported() bool { diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lcow_windows.go similarity index 63% rename from vendor/github.com/docker/docker/pkg/system/lcow_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/lcow_windows.go index f0139df8..b6bbfde4 100644 --- a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lcow_windows.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" // LCOWSupported returns true if Linux containers on Windows are supported. func LCOWSupported() bool { diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lstat_unix.go similarity index 71% rename from vendor/github.com/docker/docker/pkg/system/lstat_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/lstat_unix.go index de5a1c0f..70fefd34 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lstat_unix.go @@ -1,9 +1,8 @@ // +build !windows -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( - "os" "syscall" ) @@ -14,7 +13,7 @@ import ( func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { - return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} + return nil, err } return fromStatT(s) } diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lstat_windows.go similarity index 78% rename from vendor/github.com/docker/docker/pkg/system/lstat_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/lstat_windows.go index 359c791d..fe72ba13 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/lstat_windows.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import "os" diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo.go similarity index 83% rename from vendor/github.com/docker/docker/pkg/system/meminfo.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo.go index 6667eb84..ad2f4fb1 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" // MemInfo contains memory statistics of the host system. type MemInfo struct { diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo_linux.go similarity index 94% rename from vendor/github.com/docker/docker/pkg/system/meminfo_linux.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo_linux.go index d79e8b07..3ebd773a 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo_linux.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "bufio" diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo_unsupported.go similarity index 70% rename from vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo_unsupported.go index 56f44942..f140577d 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo_unsupported.go @@ -1,6 +1,6 @@ // +build !linux,!windows -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" // ReadMemInfo is not supported on platforms other than linux and windows. func ReadMemInfo() (*MemInfo, error) { diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo_windows.go similarity index 93% rename from vendor/github.com/docker/docker/pkg/system/meminfo_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo_windows.go index 6ed93f2f..849875f9 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/meminfo_windows.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "unsafe" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/mknod.go similarity index 91% rename from vendor/github.com/docker/docker/pkg/system/mknod.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/mknod.go index b132482e..cc020a0d 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/mknod.go @@ -1,6 +1,6 @@ // +build !windows -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "golang.org/x/sys/unix" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/mknod_windows.go similarity index 77% rename from vendor/github.com/docker/docker/pkg/system/mknod_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/mknod_windows.go index ec89d7a1..0a694970 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/mknod_windows.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" // Mknod is not implemented on Windows. func Mknod(path string, mode uint32, dev int) error { diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/path.go similarity index 96% rename from vendor/github.com/docker/docker/pkg/system/path.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/path.go index a3d957af..4a4c53d1 100644 --- a/vendor/github.com/docker/docker/pkg/system/path.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/path.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/process_unix.go similarity index 84% rename from vendor/github.com/docker/docker/pkg/system/process_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/process_unix.go index 0195a891..7bbbe320 100644 --- a/vendor/github.com/docker/docker/pkg/system/process_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/process_unix.go @@ -1,6 +1,6 @@ // +build linux freebsd darwin -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/process_windows.go similarity index 80% rename from vendor/github.com/docker/docker/pkg/system/process_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/process_windows.go index 4e70c97b..84054848 100644 --- a/vendor/github.com/docker/docker/pkg/system/process_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/process_windows.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import "os" diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/rm.go similarity index 93% rename from vendor/github.com/docker/docker/pkg/system/rm.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/rm.go index b3109918..36841b6e 100644 --- a/vendor/github.com/docker/docker/pkg/system/rm.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/rm.go @@ -1,11 +1,11 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "os" "syscall" "time" - "github.com/docker/docker/pkg/mount" + "github.com/ory/dockertest/v3/docker/pkg/mount" "github.com/pkg/errors" ) @@ -34,7 +34,7 @@ func EnsureRemoveAll(dir string) error { for { err := os.RemoveAll(dir) if err == nil { - return nil + return err } pe, ok := err.(*os.PathError) diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_darwin.go similarity index 78% rename from vendor/github.com/docker/docker/pkg/system/stat_darwin.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_darwin.go index c1c0ee9f..9c1c3bec 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_darwin.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_freebsd.go similarity index 78% rename from vendor/github.com/docker/docker/pkg/system/stat_freebsd.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_freebsd.go index c1c0ee9f..9c1c3bec 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_freebsd.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_linux.go similarity index 86% rename from vendor/github.com/docker/docker/pkg/system/stat_linux.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_linux.go index 98c9eb18..0fffa55e 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_linux.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_openbsd.go similarity index 78% rename from vendor/github.com/docker/docker/pkg/system/stat_openbsd.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_openbsd.go index 756b92d1..3909fe55 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_openbsd.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_solaris.go similarity index 78% rename from vendor/github.com/docker/docker/pkg/system/stat_solaris.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_solaris.go index 756b92d1..3909fe55 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_solaris.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_unix.go similarity index 90% rename from vendor/github.com/docker/docker/pkg/system/stat_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_unix.go index 86bb6dd5..b835b55c 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_unix.go @@ -1,9 +1,8 @@ // +build !windows -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( - "os" "syscall" ) @@ -60,7 +59,7 @@ func (s StatT) IsDir() bool { func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { - return nil, &os.PathError{Op: "Stat", Path: path, Err: err} + return nil, err } return fromStatT(s) } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_windows.go similarity index 92% rename from vendor/github.com/docker/docker/pkg/system/stat_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_windows.go index b2456cb8..159fe493 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/stat_windows.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "os" diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/syscall_unix.go similarity index 85% rename from vendor/github.com/docker/docker/pkg/system/syscall_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/syscall_unix.go index 919a412a..e9734d97 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/syscall_unix.go @@ -1,6 +1,6 @@ // +build linux freebsd -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import "golang.org/x/sys/unix" diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/syscall_windows.go similarity index 57% rename from vendor/github.com/docker/docker/pkg/system/syscall_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/syscall_windows.go index 4ae92fa6..8f5dce90 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/syscall_windows.go @@ -1,63 +1,17 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "fmt" - "syscall" "unsafe" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) -const ( - OWNER_SECURITY_INFORMATION = 0x00000001 - GROUP_SECURITY_INFORMATION = 0x00000002 - DACL_SECURITY_INFORMATION = 0x00000004 - SACL_SECURITY_INFORMATION = 0x00000008 - LABEL_SECURITY_INFORMATION = 0x00000010 - ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 - SCOPE_SECURITY_INFORMATION = 0x00000040 - PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 - ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 - BACKUP_SECURITY_INFORMATION = 0x00010000 - PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 - PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 - UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 - UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 -) - -const ( - SE_UNKNOWN_OBJECT_TYPE = iota - SE_FILE_OBJECT - SE_SERVICE - SE_PRINTER - SE_REGISTRY_KEY - SE_LMSHARE - SE_KERNEL_OBJECT - SE_WINDOW_OBJECT - SE_DS_OBJECT - SE_DS_OBJECT_ALL - SE_PROVIDER_DEFINED_OBJECT - SE_WMIGUID_OBJECT - SE_REGISTRY_WOW64_32KEY -) - -const ( - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" -) - -const ( - ContainerAdministratorSidString = "S-1-5-93-2-1" - ContainerUserSidString = "S-1-5-93-2-2" -) - var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") - procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") - procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") ) // OSVersion is a wrapper for Windows version information @@ -100,6 +54,7 @@ func GetOSVersion() OSVersion { return osv } +// ToString returns a textual representation of the version. func (osv OSVersion) ToString() string { return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) } @@ -171,23 +126,3 @@ func HasWin32KSupport() bool { // APIs. return ntuserApiset.Load() == nil } - -func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) - if r0 != 0 { - result = syscall.Errno(r0) - } - return -} - -func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) - if r1 == 0 { - if e1 != 0 { - result = syscall.Errno(e1) - } else { - result = syscall.EINVAL - } - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/umask.go similarity index 76% rename from vendor/github.com/docker/docker/pkg/system/umask.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/umask.go index 9912a2ba..0c0c183f 100644 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/umask.go @@ -1,6 +1,6 @@ // +build !windows -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "golang.org/x/sys/unix" diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/umask_windows.go similarity index 71% rename from vendor/github.com/docker/docker/pkg/system/umask_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/umask_windows.go index fc62388c..001af707 100644 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/umask_windows.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" // Umask is not supported on the windows platform. func Umask(newmask int) (oldmask int, err error) { diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/utimes_freebsd.go similarity index 88% rename from vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/utimes_freebsd.go index ed1b9fad..bffe852b 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/utimes_freebsd.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/utimes_linux.go similarity index 89% rename from vendor/github.com/docker/docker/pkg/system/utimes_linux.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/utimes_linux.go index 0afe8545..841b9c88 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/utimes_linux.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/utimes_unsupported.go similarity index 72% rename from vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/utimes_unsupported.go index 095e072e..3b68e06a 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/utimes_unsupported.go @@ -1,6 +1,6 @@ // +build !linux,!freebsd -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/xattrs_linux.go similarity index 91% rename from vendor/github.com/docker/docker/pkg/system/xattrs_linux.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/xattrs_linux.go index 66d4895b..1fa01f44 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/xattrs_linux.go @@ -1,4 +1,4 @@ -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" import "golang.org/x/sys/unix" diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/xattrs_unsupported.go similarity index 82% rename from vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go rename to vendor/github.com/ory/dockertest/v3/docker/pkg/system/xattrs_unsupported.go index d780a90c..2f949c01 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/system/xattrs_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package system // import "github.com/docker/docker/pkg/system" +package system // import "github.com/ory/dockertest/v3/docker/pkg/system" // Lgetxattr is not supported on platforms other than linux. func Lgetxattr(path string, attr string) ([]byte, error) { diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/ascii.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/ascii.go new file mode 100644 index 00000000..aedd93e6 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/ascii.go @@ -0,0 +1,66 @@ +package term // import "github.com/ory/dockertest/v3/docker/pkg/term" + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, key[0]) + } + } + return codes, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/proxy.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/proxy.go new file mode 100644 index 00000000..46978144 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/proxy.go @@ -0,0 +1,74 @@ +package term // import "github.com/ory/dockertest/v3/docker/pkg/term" + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (int, error) { + nr, err := r.r.Read(buf) + + preserve := func() { + // this preserves the original key presses in the passed in buffer + nr += r.escapeKeyPos + preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf...) + r.escapeKeyPos = 0 + copy(buf[0:nr], preserve) + } + + if nr != 1 || err != nil { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, err + } + + if buf[0] != r.escapeKeys[r.escapeKeyPos] { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, nil + } + + if r.escapeKeyPos == len(r.escapeKeys)-1 { + return 0, EscapeError{} + } + + // Looks like we've got an escape key, but we need to match again on the next + // read. + // Store the current escape key we found so we can look for the next one on + // the next read. + // Since this is an escape key, make sure we don't let the caller read it + // If later on we find that this is not the escape sequence, we'll add the + // keys back + r.escapeKeyPos++ + return nr - r.escapeKeyPos, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/tc.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/tc.go new file mode 100644 index 00000000..e3205380 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/tc.go @@ -0,0 +1,20 @@ +// +build !windows + +package term // import "github.com/ory/dockertest/v3/docker/pkg/term" + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/term.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/term.go new file mode 100644 index 00000000..65db9db5 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/term.go @@ -0,0 +1,124 @@ +// +build !windows + +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term // import "github.com/ory/dockertest/v3/docker/pkg/term" + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + + "golang.org/x/sys/unix" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= unix.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + go func() { + for range sigchan { + // quit cleanly and the new terminal item is on a new line + fmt.Println() + signal.Stop(sigchan) + close(sigchan) + RestoreTerminal(fd, state) + os.Exit(1) + } + }() +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/term_windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/term_windows.go new file mode 100644 index 00000000..8be1a64e --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/term_windows.go @@ -0,0 +1,228 @@ +package term // import "github.com/ory/dockertest/v3/docker/pkg/term" + +import ( + "io" + "os" + "os/signal" + "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE + + "github.com/Azure/go-ansiterm/winterm" + "github.com/ory/dockertest/v3/docker/pkg/term/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 +} + +// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console +var vtInputSupported bool + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var emulateStdin, emulateStdout, emulateStderr bool + fd := os.Stdin.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + winterm.SetConsoleMode(fd, mode) + } + + fd = os.Stdout.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStdout = true + } else { + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + fd = os.Stderr.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStderr = true + } else { + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { + // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. + emulateStdin = true + emulateStdout = false + emulateStderr = false + } + + // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and + // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as + // go-ansiterm hasn't switch to x/sys/windows. + // TODO: switch back to x/sys/windows once go-ansiterm has switched + if emulateStdin { + stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) + } else { + stdErr = os.Stderr + } + + return +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windowsconsole.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return windowsconsole.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winterm.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e + } + + return &State{mode: mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + err := winterm.SetConsoleMode(fd, mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this + // version of Windows. + winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + err = winterm.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/termios_bsd.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/termios_bsd.go new file mode 100644 index 00000000..e1ca09dd --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/termios_bsd.go @@ -0,0 +1,42 @@ +// +build darwin freebsd openbsd + +package term // import "github.com/ory/dockertest/v3/docker/pkg/term" + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TIOCGETA + setTermios = unix.TIOCSETA +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 + newState.Cc[unix.VMIN] = 1 + newState.Cc[unix.VTIME] = 0 + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/termios_linux.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/termios_linux.go new file mode 100644 index 00000000..ffa7a682 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/termios_linux.go @@ -0,0 +1,39 @@ +package term // import "github.com/ory/dockertest/v3/docker/pkg/term" + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TCGETS + setTermios = unix.TCSETS +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + termios, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { + return nil, err + } + + var oldState State + oldState.termios = Termios(*termios) + + termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + termios.Oflag &^= unix.OPOST + termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + termios.Cflag &^= (unix.CSIZE | unix.PARENB) + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { + return nil, err + } + return &oldState, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/ansi_reader.go new file mode 100644 index 00000000..24cb8d83 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/ansi_reader.go @@ -0,0 +1,263 @@ +// +build windows + +package windowsconsole // import "github.com/ory/dockertest/v3/docker/pkg/term/windows" + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + initLogger() + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar.fd, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + logger.Debug("No input events detected") + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translator") + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/ansi_writer.go new file mode 100644 index 00000000..c12a7e5c --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/ansi_writer.go @@ -0,0 +1,64 @@ +// +build windows + +package windowsconsole // import "github.com/ory/dockertest/v3/docker/pkg/term/windows" + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + initLogger() + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) + + aw := &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) + return aw.parser.Parse(p) +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/console.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/console.go new file mode 100644 index 00000000..d9fea499 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/console.go @@ -0,0 +1,35 @@ +// +build windows + +package windowsconsole // import "github.com/ory/dockertest/v3/docker/pkg/term/windows" + +import ( + "os" + + "github.com/Azure/go-ansiterm/winterm" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = IsConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := winterm.GetConsoleMode(fd) + return e == nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/windows.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/windows.go new file mode 100644 index 00000000..8fce0ba8 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/windows/windows.go @@ -0,0 +1,33 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windowsconsole // import "github.com/ory/dockertest/v3/docker/pkg/term/windows" + +import ( + "io/ioutil" + "os" + "sync" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/sirupsen/logrus" +) + +var logger *logrus.Logger +var initOnce sync.Once + +func initLogger() { + initOnce.Do(func() { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + }) +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/pkg/term/winsize.go b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/winsize.go new file mode 100644 index 00000000..fb8ea7f5 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/pkg/term/winsize.go @@ -0,0 +1,20 @@ +// +build !windows + +package term // import "github.com/ory/dockertest/v3/docker/pkg/term" + +import ( + "golang.org/x/sys/unix" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/plugin.go b/vendor/github.com/ory/dockertest/v3/docker/plugin.go new file mode 100644 index 00000000..a28ff3d1 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/plugin.go @@ -0,0 +1,418 @@ +// Copyright 2018 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" +) + +// PluginPrivilege represents a privilege for a plugin. +type PluginPrivilege struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"` + Value []string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` +} + +// InstallPluginOptions specify parameters to the InstallPlugins function. +// +// See https://goo.gl/C4t7Tz for more details. +type InstallPluginOptions struct { + Remote string + Name string + Plugins []PluginPrivilege `qs:"-"` + + Auth AuthConfiguration + + Context context.Context +} + +// InstallPlugins installs a plugin or returns an error in case of failure. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) InstallPlugins(opts InstallPluginOptions) error { + path := "/plugins/pull?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{ + data: opts.Plugins, + context: opts.Context, + }) + defer resp.Body.Close() + if err != nil { + return err + } + return nil +} + +// PluginSettings stores plugin settings. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginSettings struct { + Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"` + Args []string `json:"Args,omitempty" yaml:"Args,omitempty" toml:"Args,omitempty"` + Devices []string `json:"Devices,omitempty" yaml:"Devices,omitempty" toml:"Devices,omitempty"` +} + +// PluginInterface stores plugin interface. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginInterface struct { + Types []string `json:"Types,omitempty" yaml:"Types,omitempty" toml:"Types,omitempty"` + Socket string `json:"Socket,omitempty" yaml:"Socket,omitempty" toml:"Socket,omitempty"` +} + +// PluginNetwork stores plugin network type. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginNetwork struct { + Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"` +} + +// PluginLinux stores plugin linux setting. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginLinux struct { + Capabilities []string `json:"Capabilities,omitempty" yaml:"Capabilities,omitempty" toml:"Capabilities,omitempty"` + AllowAllDevices bool `json:"AllowAllDevices,omitempty" yaml:"AllowAllDevices,omitempty" toml:"AllowAllDevices,omitempty"` + Devices []PluginLinuxDevices `json:"Devices,omitempty" yaml:"Devices,omitempty" toml:"Devices,omitempty"` +} + +// PluginLinuxDevices stores plugin linux device setting. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginLinuxDevices struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Description string `json:"Documentation,omitempty" yaml:"Documentation,omitempty" toml:"Documentation,omitempty"` + Settable []string `json:"Settable,omitempty" yaml:"Settable,omitempty" toml:"Settable,omitempty"` + Path string `json:"Path,omitempty" yaml:"Path,omitempty" toml:"Path,omitempty"` +} + +// PluginEnv stores plugin environment. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginEnv struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"` + Settable []string `json:"Settable,omitempty" yaml:"Settable,omitempty" toml:"Settable,omitempty"` + Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` +} + +// PluginArgs stores plugin arguments. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginArgs struct { + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"` + Settable []string `json:"Settable,omitempty" yaml:"Settable,omitempty" toml:"Settable,omitempty"` + Value []string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"` +} + +// PluginUser stores plugin user. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginUser struct { + UID int32 `json:"UID,omitempty" yaml:"UID,omitempty" toml:"UID,omitempty"` + GID int32 `json:"GID,omitempty" yaml:"GID,omitempty" toml:"GID,omitempty"` +} + +// PluginConfig stores plugin config. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginConfig struct { + Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"` + Documentation string + Interface PluginInterface `json:"Interface,omitempty" yaml:"Interface,omitempty" toml:"Interface,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty" yaml:"Entrypoint,omitempty" toml:"Entrypoint,omitempty"` + WorkDir string `json:"WorkDir,omitempty" yaml:"WorkDir,omitempty" toml:"WorkDir,omitempty"` + User PluginUser `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` + Network PluginNetwork `json:"Network,omitempty" yaml:"Network,omitempty" toml:"Network,omitempty"` + Linux PluginLinux `json:"Linux,omitempty" yaml:"Linux,omitempty" toml:"Linux,omitempty"` + PropagatedMount string `json:"PropagatedMount,omitempty" yaml:"PropagatedMount,omitempty" toml:"PropagatedMount,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"` + Env []PluginEnv `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"` + Args PluginArgs `json:"Args,omitempty" yaml:"Args,omitempty" toml:"Args,omitempty"` +} + +// PluginDetail specify results from the ListPlugins function. +// +// See https://goo.gl/C4t7Tz for more details. +type PluginDetail struct { + ID string `json:"Id,omitempty" yaml:"Id,omitempty" toml:"Id,omitempty"` + Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"` + Tag string `json:"Tag,omitempty" yaml:"Tag,omitempty" toml:"Tag,omitempty"` + Active bool `json:"Active,omitempty" yaml:"Active,omitempty" toml:"Active,omitempty"` + Settings PluginSettings `json:"Settings,omitempty" yaml:"Settings,omitempty" toml:"Settings,omitempty"` + Config PluginConfig `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"` +} + +// ListPlugins returns pluginDetails or an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) ListPlugins(ctx context.Context) ([]PluginDetail, error) { + resp, err := c.do("GET", "/plugins", doOptions{ + context: ctx, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + pluginDetails := make([]PluginDetail, 0) + if err := json.NewDecoder(resp.Body).Decode(&pluginDetails); err != nil { + return nil, err + } + return pluginDetails, nil +} + +// ListFilteredPluginsOptions specify parameters to the ListFilteredPlugins function. +// +// See https://goo.gl/C4t7Tz for more details. +type ListFilteredPluginsOptions struct { + Filters map[string][]string + Context context.Context +} + +// ListFilteredPlugins returns pluginDetails or an error. +// +// See https://goo.gl/rmdmWg for more details. +func (c *Client) ListFilteredPlugins(opts ListFilteredPluginsOptions) ([]PluginDetail, error) { + path := "/plugins/json?" + queryString(opts) + resp, err := c.do("GET", path, doOptions{ + context: opts.Context, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + pluginDetails := make([]PluginDetail, 0) + if err := json.NewDecoder(resp.Body).Decode(&pluginDetails); err != nil { + return nil, err + } + return pluginDetails, nil +} + +// GetPluginPrivileges returns pulginPrivileges or an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) GetPluginPrivileges(name string, ctx context.Context) ([]PluginPrivilege, error) { + resp, err := c.do("GET", "/plugins/privileges?remote="+name, doOptions{ + context: ctx, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var pluginPrivileges []PluginPrivilege + if err := json.NewDecoder(resp.Body).Decode(&pluginPrivileges); err != nil { + return nil, err + } + return pluginPrivileges, nil +} + +// InspectPlugins returns a pluginDetail or an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) InspectPlugins(name string, ctx context.Context) (*PluginDetail, error) { + resp, err := c.do("GET", "/plugins/"+name+"/json", doOptions{ + context: ctx, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchPlugin{ID: name} + } + return nil, err + } + resp.Body.Close() + var pluginDetail PluginDetail + if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil { + return nil, err + } + return &pluginDetail, nil +} + +// RemovePluginOptions specify parameters to the RemovePlugin function. +// +// See https://goo.gl/C4t7Tz for more details. +type RemovePluginOptions struct { + // The Name of the plugin. + Name string `qs:"-"` + + Force bool `qs:"force"` + Context context.Context +} + +// RemovePlugin returns a PluginDetail or an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) RemovePlugin(opts RemovePluginOptions) (*PluginDetail, error) { + path := "/plugins/" + opts.Name + "?" + queryString(opts) + resp, err := c.do("DELETE", path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, &NoSuchPlugin{ID: opts.Name} + } + return nil, err + } + resp.Body.Close() + var pluginDetail PluginDetail + if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil { + return nil, err + } + return &pluginDetail, nil +} + +// EnablePluginOptions specify parameters to the EnablePlugin function. +// +// See https://goo.gl/C4t7Tz for more details. +type EnablePluginOptions struct { + // The Name of the plugin. + Name string `qs:"-"` + Timeout int64 `qs:"timeout"` + + Context context.Context +} + +// EnablePlugin enables plugin that opts point or returns an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) EnablePlugin(opts EnablePluginOptions) error { + path := "/plugins/" + opts.Name + "/enable?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{context: opts.Context}) + defer resp.Body.Close() + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisablePluginOptions specify parameters to the DisablePlugin function. +// +// See https://goo.gl/C4t7Tz for more details. +type DisablePluginOptions struct { + // The Name of the plugin. + Name string `qs:"-"` + + Context context.Context +} + +// DisablePlugin disables plugin that opts point or returns an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) DisablePlugin(opts DisablePluginOptions) error { + path := "/plugins/" + opts.Name + "/disable" + resp, err := c.do("POST", path, doOptions{context: opts.Context}) + defer resp.Body.Close() + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CreatePluginOptions specify parameters to the CreatePlugin function. +// +// See https://goo.gl/C4t7Tz for more details. +type CreatePluginOptions struct { + // The Name of the plugin. + Name string `qs:"name"` + // Path to tar containing plugin + Path string `qs:"-"` + + Context context.Context +} + +// CreatePlugin creates plugin that opts point or returns an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) CreatePlugin(opts CreatePluginOptions) (string, error) { + path := "/plugins/create?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{ + data: opts.Path, + context: opts.Context}) + defer resp.Body.Close() + if err != nil { + return "", err + } + containerNameBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(containerNameBytes), nil +} + +// PushPluginOptions specify parameters to PushPlugin function. +// +// See https://goo.gl/C4t7Tz for more details. +type PushPluginOptions struct { + // The Name of the plugin. + Name string + + Context context.Context +} + +// PushPlugin pushes plugin that opts point or returns an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) PushPlugin(opts PushPluginOptions) error { + path := "/plugins/" + opts.Name + "/push" + resp, err := c.do("POST", path, doOptions{context: opts.Context}) + defer resp.Body.Close() + if err != nil { + return err + } + return nil +} + +// ConfigurePluginOptions specify parameters to the ConfigurePlugin +// +// See https://goo.gl/C4t7Tz for more details. +type ConfigurePluginOptions struct { + // The Name of the plugin. + Name string `qs:"name"` + Envs []string + + Context context.Context +} + +// ConfigurePlugin configures plugin that opts point or returns an error. +// +// See https://goo.gl/C4t7Tz for more details. +func (c *Client) ConfigurePlugin(opts ConfigurePluginOptions) error { + path := "/plugins/" + opts.Name + "/set" + resp, err := c.do("POST", path, doOptions{ + data: opts.Envs, + context: opts.Context, + }) + defer resp.Body.Close() + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchPlugin{ID: opts.Name} + } + return err + } + return nil +} + +// NoSuchPlugin is the error returned when a given plugin does not exist. +type NoSuchPlugin struct { + ID string + Err error +} + +func (err *NoSuchPlugin) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return "No such plugin: " + err.ID +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/signal.go b/vendor/github.com/ory/dockertest/v3/docker/signal.go new file mode 100644 index 00000000..16aa0038 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/signal.go @@ -0,0 +1,49 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +// Signal represents a signal that can be send to the container on +// KillContainer call. +type Signal int + +// These values represent all signals available on Linux, where containers will +// be running. +const ( + SIGABRT = Signal(0x6) + SIGALRM = Signal(0xe) + SIGBUS = Signal(0x7) + SIGCHLD = Signal(0x11) + SIGCLD = Signal(0x11) + SIGCONT = Signal(0x12) + SIGFPE = Signal(0x8) + SIGHUP = Signal(0x1) + SIGILL = Signal(0x4) + SIGINT = Signal(0x2) + SIGIO = Signal(0x1d) + SIGIOT = Signal(0x6) + SIGKILL = Signal(0x9) + SIGPIPE = Signal(0xd) + SIGPOLL = Signal(0x1d) + SIGPROF = Signal(0x1b) + SIGPWR = Signal(0x1e) + SIGQUIT = Signal(0x3) + SIGSEGV = Signal(0xb) + SIGSTKFLT = Signal(0x10) + SIGSTOP = Signal(0x13) + SIGSYS = Signal(0x1f) + SIGTERM = Signal(0xf) + SIGTRAP = Signal(0x5) + SIGTSTP = Signal(0x14) + SIGTTIN = Signal(0x15) + SIGTTOU = Signal(0x16) + SIGUNUSED = Signal(0x1f) + SIGURG = Signal(0x17) + SIGUSR1 = Signal(0xa) + SIGUSR2 = Signal(0xc) + SIGVTALRM = Signal(0x1a) + SIGWINCH = Signal(0x1c) + SIGXCPU = Signal(0x18) + SIGXFSZ = Signal(0x19) +) diff --git a/vendor/github.com/ory/dockertest/v3/docker/tar.go b/vendor/github.com/ory/dockertest/v3/docker/tar.go new file mode 100644 index 00000000..cbb24cdd --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/tar.go @@ -0,0 +1,122 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + + "github.com/ory/dockertest/v3/docker/pkg/archive" + "github.com/ory/dockertest/v3/docker/pkg/fileutils" +) + +func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { + srcPath, err := filepath.Abs(srcPath) + if err != nil { + return nil, err + } + + excludes, err := parseDockerignore(srcPath) + if err != nil { + return nil, err + } + + includes := []string{"."} + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The deamon will remove them for us, if needed, after it + // parses the Dockerfile. + // + // https://github.com/docker/docker/issues/8330 + // + forceIncludeFiles := []string{".dockerignore", dockerfilePath} + + for _, includeFile := range forceIncludeFiles { + if includeFile == "" { + continue + } + keepThem, err := fileutils.Matches(includeFile, excludes) + if err != nil { + return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err) + } + if keepThem { + includes = append(includes, includeFile) + } + } + + if err := validateContextDirectory(srcPath, excludes); err != nil { + return nil, err + } + tarOpts := &archive.TarOptions{ + ExcludePatterns: excludes, + IncludeFiles: includes, + Compression: archive.Uncompressed, + NoLchown: true, + } + return archive.TarWithOptions(srcPath, tarOpts) +} + +// validateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read. +// Symlinks which point to non-existing files don't trigger an error +func validateContextDirectory(srcPath string, excludes []string) error { + return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, relErr := filepath.Rel(srcPath, filePath); relErr != nil { + return relErr + } else if skip, matchErr := fileutils.Matches(relFilePath, excludes); matchErr != nil { + return matchErr + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +func parseDockerignore(root string) ([]string, error) { + var excludes []string + ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err) + } + excludes = strings.Split(string(ignore), "\n") + + return excludes, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/docker/tls.go b/vendor/github.com/ory/dockertest/v3/docker/tls.go new file mode 100644 index 00000000..bb5790b5 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/tls.go @@ -0,0 +1,118 @@ +// Copyright 2014 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// The content is borrowed from Docker's own source code to provide a simple +// tls based dialer + +package docker + +import ( + "crypto/tls" + "errors" + "net" + "strings" + "time" +) + +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if cwc, ok := c.rawConn.(interface { + CloseWrite() error + }); ok { + return cwc.CloseWrite() + } + return nil +} + +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + rawConn, err := dialer.Dial(network, addr) + if err != nil { + return nil, err + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + config = copyTLSConfig(config) + config.ServerName = hostname + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +// this exists to silent an error message in go vet +func copyTLSConfig(cfg *tls.Config) *tls.Config { + return &tls.Config{ + Certificates: cfg.Certificates, + CipherSuites: cfg.CipherSuites, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + ClientSessionCache: cfg.ClientSessionCache, + CurvePreferences: cfg.CurvePreferences, + InsecureSkipVerify: cfg.InsecureSkipVerify, + MaxVersion: cfg.MaxVersion, + MinVersion: cfg.MinVersion, + NameToCertificate: cfg.NameToCertificate, + NextProtos: cfg.NextProtos, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + Rand: cfg.Rand, + RootCAs: cfg.RootCAs, + ServerName: cfg.ServerName, + SessionTicketKey: cfg.SessionTicketKey, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + } +} diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/ory/dockertest/v3/docker/types/auth.go similarity index 91% rename from vendor/github.com/docker/docker/api/types/auth.go rename to vendor/github.com/ory/dockertest/v3/docker/types/auth.go index ddf15bb1..bd3ed27d 100644 --- a/vendor/github.com/docker/docker/api/types/auth.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/auth.go @@ -1,4 +1,4 @@ -package types // import "github.com/docker/docker/api/types" +package types // import "github.com/ory/dockertest/v3/docker/types" // AuthConfig contains authorization information for connecting to a Registry type AuthConfig struct { diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/ory/dockertest/v3/docker/types/blkiodev/blkio.go similarity index 84% rename from vendor/github.com/docker/docker/api/types/blkiodev/blkio.go rename to vendor/github.com/ory/dockertest/v3/docker/types/blkiodev/blkio.go index bf3463b9..0702b835 100644 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/blkiodev/blkio.go @@ -1,4 +1,4 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" +package blkiodev // import "github.com/ory/dockertest/v3/docker/types/blkiodev" import "fmt" diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/ory/dockertest/v3/docker/types/client.go similarity index 91% rename from vendor/github.com/docker/docker/api/types/client.go rename to vendor/github.com/ory/dockertest/v3/docker/types/client.go index 4b9f5028..6a44b4f3 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/client.go @@ -1,12 +1,12 @@ -package types // import "github.com/docker/docker/api/types" +package types // import "github.com/ory/dockertest/v3/docker/types" import ( "bufio" "io" "net" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" + "github.com/ory/dockertest/v3/docker/types/container" + "github.com/ory/dockertest/v3/docker/types/filters" units "github.com/docker/go-units" ) @@ -181,33 +181,8 @@ type ImageBuildOptions struct { Target string SessionID string Platform string - // Version specifies the version of the unerlying builder to use - Version BuilderVersion - // BuildID is an optional identifier that can be passed together with the - // build request. The same identifier can be used to gracefully cancel the - // build with the cancel request. - BuildID string - // Outputs defines configurations for exporting build results. Only supported - // in BuildKit mode - Outputs []ImageBuildOutput } -// ImageBuildOutput defines configuration for exporting a build result -type ImageBuildOutput struct { - Type string - Attrs map[string]string -} - -// BuilderVersion sets the version of underlying builder to use -type BuilderVersion string - -const ( - // BuilderV1 is the first generation builder in docker daemon - BuilderV1 BuilderVersion = "1" - // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit = "2" -) - // ImageBuildResponse holds information // returned by a server after building // an image. diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/ory/dockertest/v3/docker/types/configs.go similarity index 81% rename from vendor/github.com/docker/docker/api/types/configs.go rename to vendor/github.com/ory/dockertest/v3/docker/types/configs.go index 178e911a..f125c8bd 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/configs.go @@ -1,8 +1,8 @@ -package types // import "github.com/docker/docker/api/types" +package types // import "github.com/ory/dockertest/v3/docker/types" import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" + "github.com/ory/dockertest/v3/docker/types/container" + "github.com/ory/dockertest/v3/docker/types/network" ) // configs holds structs used for internal communication between the @@ -55,10 +55,3 @@ type PluginEnableConfig struct { type PluginDisableConfig struct { ForceDisable bool } - -// NetworkListConfig stores the options available for listing networks -type NetworkListConfig struct { - // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here - Detailed bool - Verbose bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/ory/dockertest/v3/docker/types/container/config.go similarity index 95% rename from vendor/github.com/docker/docker/api/types/container/config.go rename to vendor/github.com/ory/dockertest/v3/docker/types/container/config.go index f767195b..e72510a8 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/container/config.go @@ -1,9 +1,9 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // import "github.com/ory/dockertest/v3/docker/types/container" import ( "time" - "github.com/docker/docker/api/types/strslice" + "github.com/ory/dockertest/v3/docker/types/strslice" "github.com/docker/go-connections/nat" ) @@ -54,7 +54,7 @@ type Config struct { Env []string // List of environment variable to set in the container Cmd strslice.StrSlice // Command to run when starting the container Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) Volumes map[string]struct{} // List of volumes (mounts) used for the container WorkingDir string // Current directory (PWD) in the command will be launched diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/ory/dockertest/v3/docker/types/container/container_changes.go similarity index 88% rename from vendor/github.com/docker/docker/api/types/container/container_changes.go rename to vendor/github.com/ory/dockertest/v3/docker/types/container/container_changes.go index 222d1410..c909d6ca 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/container/container_changes.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/ory/dockertest/v3/docker/types/container/container_create.go similarity index 89% rename from vendor/github.com/docker/docker/api/types/container/container_create.go rename to vendor/github.com/ory/dockertest/v3/docker/types/container/container_create.go index 1ec9c372..49efa0f2 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/container/container_create.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/ory/dockertest/v3/docker/types/container/container_top.go similarity index 89% rename from vendor/github.com/docker/docker/api/types/container/container_top.go rename to vendor/github.com/ory/dockertest/v3/docker/types/container/container_top.go index f8a60668..ba41edcf 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/container/container_top.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/ory/dockertest/v3/docker/types/container/container_update.go similarity index 86% rename from vendor/github.com/docker/docker/api/types/container/container_update.go rename to vendor/github.com/ory/dockertest/v3/docker/types/container/container_update.go index 33addedf..7630ae54 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/container/container_update.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/ory/dockertest/v3/docker/types/container/container_wait.go similarity index 91% rename from vendor/github.com/docker/docker/api/types/container/container_wait.go rename to vendor/github.com/ory/dockertest/v3/docker/types/container/container_wait.go index 94b6a20e..9e3910a6 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/container/container_wait.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/ory/dockertest/v3/docker/types/container/host_config.go similarity index 89% rename from vendor/github.com/docker/docker/api/types/container/host_config.go rename to vendor/github.com/ory/dockertest/v3/docker/types/container/host_config.go index c7101077..84dbc3b9 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/container/host_config.go @@ -1,11 +1,11 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // import "github.com/ory/dockertest/v3/docker/types/container" import ( "strings" - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/strslice" + "github.com/ory/dockertest/v3/docker/types/blkiodev" + "github.com/ory/dockertest/v3/docker/types/mount" + "github.com/ory/dockertest/v3/docker/types/strslice" "github.com/docker/go-connections/nat" "github.com/docker/go-units" ) @@ -244,16 +244,6 @@ func (n PidMode) Container() string { return "" } -// DeviceRequest represents a request for devices from a device driver. -// Used by GPU device drivers. -type DeviceRequest struct { - Driver string // Name of device driver - Count int // Number of devices to request (-1 = All) - DeviceIDs []string // List of device IDs as recognizable by the device driver - Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") - Options map[string]string // Options to pass onto the device driver -} - // DeviceMapping represents the device mapping between the host and the container. type DeviceMapping struct { PathOnHost string @@ -337,15 +327,13 @@ type Resources struct { CpusetMems string // CpusetMems 0-2, 0,1 Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup - DeviceRequests []DeviceRequest // List of device requests for device drivers DiskQuota int64 // Disk limit (in bytes) KernelMemory int64 // Kernel memory limit (in bytes) - KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap MemorySwappiness *int64 // Tuning container memory swappiness behaviour OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + PidsLimit int64 // Setting pids limit for a container Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows @@ -381,10 +369,9 @@ type HostConfig struct { // Applicable to UNIX platforms CapAdd strslice.StrSlice // List of kernel capabilities to add to the container CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set) - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for ExtraHosts []string // List of extra hosts GroupAdd []string // List of additional groups that the container process will run as IpcMode IpcMode // IPC namespace to use for the container @@ -414,12 +401,6 @@ type HostConfig struct { // Mounts specs used by the container Mounts []mount.Mount `json:",omitempty"` - // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) - MaskedPaths []string - - // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) - ReadonlyPaths []string - // Run a custom init inside the container, if null, use the daemon's configured settings Init *bool `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/ory/dockertest/v3/docker/types/container/hostconfig_unix.go similarity index 92% rename from vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go rename to vendor/github.com/ory/dockertest/v3/docker/types/container/hostconfig_unix.go index cf6fdf44..22864689 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/container/hostconfig_unix.go @@ -1,6 +1,6 @@ // +build !windows -package container // import "github.com/docker/docker/api/types/container" +package container // import "github.com/ory/dockertest/v3/docker/types/container" // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/ory/dockertest/v3/docker/types/container/hostconfig_windows.go similarity index 92% rename from vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go rename to vendor/github.com/ory/dockertest/v3/docker/types/container/hostconfig_windows.go index 99f803a5..1aa7732b 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/container/hostconfig_windows.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // import "github.com/ory/dockertest/v3/docker/types/container" // IsBridge indicates whether container uses the bridge network stack // in windows it is given the name NAT diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/ory/dockertest/v3/docker/types/container/waitcondition.go similarity index 90% rename from vendor/github.com/docker/docker/api/types/container/waitcondition.go rename to vendor/github.com/ory/dockertest/v3/docker/types/container/waitcondition.go index cd8311f9..9c99e345 100644 --- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/container/waitcondition.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // import "github.com/ory/dockertest/v3/docker/types/container" // WaitCondition is a type used to specify a container state for which // to wait. diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/ory/dockertest/v3/docker/types/error_response.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/error_response.go rename to vendor/github.com/ory/dockertest/v3/docker/types/error_response.go diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/ory/dockertest/v3/docker/types/filters/parse.go similarity index 84% rename from vendor/github.com/docker/docker/api/types/filters/parse.go rename to vendor/github.com/ory/dockertest/v3/docker/types/filters/parse.go index 1f75403f..293c93d8 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/filters/parse.go @@ -1,14 +1,15 @@ /*Package filters provides tools for encoding a mapping of keys to a set of multiple values. */ -package filters // import "github.com/docker/docker/api/types/filters" +package filters // import "github.com/ory/dockertest/v3/docker/types/filters" import ( "encoding/json" + "errors" "regexp" "strings" - "github.com/docker/docker/api/types/versions" + "github.com/ory/dockertest/v3/docker/types/versions" ) // Args stores a mapping of keys to a set of multiple values. @@ -36,6 +37,41 @@ func NewArgs(initialArgs ...KeyValuePair) Args { return args } +// ParseFlag parses a key=value string and adds it to an Args. +// +// Deprecated: Use Args.Add() +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned when a filter is not in the form key=value +// +// Deprecated: this error will be removed in a future version +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam encodes the Args as args JSON encoded string +// +// Deprecated: use ToJSON +func ToParam(a Args) (string, error) { + return ToJSON(a) +} + // MarshalJSON returns a JSON byte representation of the Args func (args Args) MarshalJSON() ([]byte, error) { if len(args.fields) == 0 { @@ -71,6 +107,13 @@ func ToParamWithVersion(version string, a Args) (string, error) { return ToJSON(a) } +// FromParam decodes a JSON encoded string into Args +// +// Deprecated: use FromJSON +func FromParam(p string) (Args, error) { + return FromJSON(p) +} + // FromJSON decodes a JSON encoded string into Args func FromJSON(p string) (Args, error) { args := NewArgs() @@ -232,6 +275,14 @@ func (args Args) FuzzyMatch(key, source string) bool { return false } +// Include returns true if the key exists in the mapping +// +// Deprecated: use Contains +func (args Args) Include(field string) bool { + _, ok := args.fields[field] + return ok +} + // Contains returns true if the key exists in the mapping func (args Args) Contains(field string) bool { _, ok := args.fields[field] @@ -272,22 +323,6 @@ func (args Args) WalkValues(field string, op func(value string) error) error { return nil } -// Clone returns a copy of args. -func (args Args) Clone() (newArgs Args) { - newArgs.fields = make(map[string]map[string]bool, len(args.fields)) - for k, m := range args.fields { - var mm map[string]bool - if m != nil { - mm = make(map[string]bool, len(m)) - for kk, v := range m { - mm[kk] = v - } - } - newArgs.fields[k] = mm - } - return newArgs -} - func deprecatedArgs(d map[string][]string) map[string]map[string]bool { m := map[string]map[string]bool{} for k, v := range d { diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/ory/dockertest/v3/docker/types/graph_driver_data.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/graph_driver_data.go rename to vendor/github.com/ory/dockertest/v3/docker/types/graph_driver_data.go diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/ory/dockertest/v3/docker/types/id_response.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/id_response.go rename to vendor/github.com/ory/dockertest/v3/docker/types/id_response.go diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/ory/dockertest/v3/docker/types/image_delete_response_item.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/image_delete_response_item.go rename to vendor/github.com/ory/dockertest/v3/docker/types/image_delete_response_item.go diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/ory/dockertest/v3/docker/types/image_summary.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/image_summary.go rename to vendor/github.com/ory/dockertest/v3/docker/types/image_summary.go diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/ory/dockertest/v3/docker/types/mount/mount.go similarity index 96% rename from vendor/github.com/docker/docker/api/types/mount/mount.go rename to vendor/github.com/ory/dockertest/v3/docker/types/mount/mount.go index ab4446b3..450377e2 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/mount/mount.go @@ -1,4 +1,4 @@ -package mount // import "github.com/docker/docker/api/types/mount" +package mount // import "github.com/ory/dockertest/v3/docker/types/mount" import ( "os" @@ -79,8 +79,7 @@ const ( // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` + Propagation Propagation `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/ory/dockertest/v3/docker/types/network/network.go similarity index 84% rename from vendor/github.com/docker/docker/api/types/network/network.go rename to vendor/github.com/ory/dockertest/v3/docker/types/network/network.go index 71e97338..154acdff 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/network/network.go @@ -1,8 +1,4 @@ -package network // import "github.com/docker/docker/api/types/network" -import ( - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/errdefs" -) +package network // import "github.com/ory/dockertest/v3/docker/types/network" // Address represents an IP address type Address struct { @@ -110,18 +106,3 @@ type NetworkingConfig struct { type ConfigReference struct { Network string } - -var acceptedFilters = map[string]bool{ - "dangling": true, - "driver": true, - "id": true, - "label": true, - "name": true, - "scope": true, - "type": true, -} - -// ValidateFilters validates the list of filter args with the available filters. -func ValidateFilters(filter filters.Args) error { - return errdefs.InvalidParameter(filter.Validate(acceptedFilters)) -} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/ory/dockertest/v3/docker/types/plugin.go similarity index 97% rename from vendor/github.com/docker/docker/api/types/plugin.go rename to vendor/github.com/ory/dockertest/v3/docker/types/plugin.go index abae48b9..cab333e0 100644 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/plugin.go @@ -121,9 +121,6 @@ type PluginConfigArgs struct { // swagger:model PluginConfigInterface type PluginConfigInterface struct { - // Protocol to use for clients connecting to the plugin. - ProtocolScheme string `json:"ProtocolScheme,omitempty"` - // socket // Required: true Socket string `json:"Socket"` diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/ory/dockertest/v3/docker/types/plugin_device.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/plugin_device.go rename to vendor/github.com/ory/dockertest/v3/docker/types/plugin_device.go diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/ory/dockertest/v3/docker/types/plugin_env.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/plugin_env.go rename to vendor/github.com/ory/dockertest/v3/docker/types/plugin_env.go diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/ory/dockertest/v3/docker/types/plugin_interface_type.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/plugin_interface_type.go rename to vendor/github.com/ory/dockertest/v3/docker/types/plugin_interface_type.go diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/ory/dockertest/v3/docker/types/plugin_mount.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/plugin_mount.go rename to vendor/github.com/ory/dockertest/v3/docker/types/plugin_mount.go diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/ory/dockertest/v3/docker/types/plugin_responses.go similarity index 96% rename from vendor/github.com/docker/docker/api/types/plugin_responses.go rename to vendor/github.com/ory/dockertest/v3/docker/types/plugin_responses.go index 60d1fb5a..94237b68 100644 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/plugin_responses.go @@ -1,4 +1,4 @@ -package types // import "github.com/docker/docker/api/types" +package types // import "github.com/ory/dockertest/v3/docker/types" import ( "encoding/json" diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/ory/dockertest/v3/docker/types/port.go similarity index 89% rename from vendor/github.com/docker/docker/api/types/port.go rename to vendor/github.com/ory/dockertest/v3/docker/types/port.go index d9123474..ad52d46d 100644 --- a/vendor/github.com/docker/docker/api/types/port.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/port.go @@ -7,7 +7,7 @@ package types // swagger:model Port type Port struct { - // Host IP address that the container's port is mapped to + // IP IP string `json:"IP,omitempty"` // Port on the container diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/ory/dockertest/v3/docker/types/registry/authenticate.go similarity index 88% rename from vendor/github.com/docker/docker/api/types/registry/authenticate.go rename to vendor/github.com/ory/dockertest/v3/docker/types/registry/authenticate.go index f0a2113e..6ac4872e 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/registry/authenticate.go @@ -1,4 +1,4 @@ -package registry // import "github.com/docker/docker/api/types/registry" +package registry // import "github.com/ory/dockertest/v3/docker/types/registry" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/ory/dockertest/v3/docker/types/registry/registry.go similarity index 97% rename from vendor/github.com/docker/docker/api/types/registry/registry.go rename to vendor/github.com/ory/dockertest/v3/docker/types/registry/registry.go index 8789ad3b..12dae112 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/registry/registry.go @@ -1,4 +1,4 @@ -package registry // import "github.com/docker/docker/api/types/registry" +package registry // import "github.com/ory/dockertest/v3/docker/types/registry" import ( "encoding/json" diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/ory/dockertest/v3/docker/types/seccomp.go similarity index 93% rename from vendor/github.com/docker/docker/api/types/seccomp.go rename to vendor/github.com/ory/dockertest/v3/docker/types/seccomp.go index 2259c6be..f602d6f6 100644 --- a/vendor/github.com/docker/docker/api/types/seccomp.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/seccomp.go @@ -1,4 +1,4 @@ -package types // import "github.com/docker/docker/api/types" +package types // import "github.com/ory/dockertest/v3/docker/types" // Seccomp represents the config for a seccomp profile for syscall restriction. type Seccomp struct { @@ -77,9 +77,8 @@ type Arg struct { // Filter is used to conditionally apply Seccomp rules type Filter struct { - Caps []string `json:"caps,omitempty"` - Arches []string `json:"arches,omitempty"` - MinKernel string `json:"minKernel,omitempty"` + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` } // Syscall is used to match a group of syscalls in Seccomp diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/ory/dockertest/v3/docker/types/service_update_response.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/service_update_response.go rename to vendor/github.com/ory/dockertest/v3/docker/types/service_update_response.go diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/ory/dockertest/v3/docker/types/stats.go similarity index 96% rename from vendor/github.com/docker/docker/api/types/stats.go rename to vendor/github.com/ory/dockertest/v3/docker/types/stats.go index 20daebed..c8325667 100644 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/stats.go @@ -1,6 +1,6 @@ // Package types is used for API stability in the types and response to the // consumers of the API stats endpoint. -package types // import "github.com/docker/docker/api/types" +package types // import "github.com/ory/dockertest/v3/docker/types" import "time" @@ -120,7 +120,7 @@ type NetworkStats struct { RxBytes uint64 `json:"rx_bytes"` // Packets received. Windows and Linux. RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we don't `omitempty` this + // Received errors. Not used on Windows. Note that we dont `omitempty` this // field as it is expected in the >=v1.21 API stats structure. RxErrors uint64 `json:"rx_errors"` // Incoming packets dropped. Windows and Linux. @@ -129,7 +129,7 @@ type NetworkStats struct { TxBytes uint64 `json:"tx_bytes"` // Packets sent. Windows and Linux. TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // Sent errors. Not used on Windows. Note that we dont `omitempty` this // field as it is expected in the >=v1.21 API stats structure. TxErrors uint64 `json:"tx_errors"` // Outgoing packets dropped. Windows and Linux. diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/ory/dockertest/v3/docker/types/strslice/strslice.go similarity index 90% rename from vendor/github.com/docker/docker/api/types/strslice/strslice.go rename to vendor/github.com/ory/dockertest/v3/docker/types/strslice/strslice.go index 82921ceb..81ffb019 100644 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/strslice/strslice.go @@ -1,4 +1,4 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" +package strslice // import "github.com/ory/dockertest/v3/docker/types/strslice" import "encoding/json" diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/ory/dockertest/v3/docker/types/types.go similarity index 94% rename from vendor/github.com/docker/docker/api/types/types.go rename to vendor/github.com/ory/dockertest/v3/docker/types/types.go index a39ffcb7..d3383cd1 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/types.go @@ -1,4 +1,4 @@ -package types // import "github.com/docker/docker/api/types" +package types // import "github.com/ory/dockertest/v3/docker/types" import ( "errors" @@ -8,12 +8,11 @@ import ( "strings" "time" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" + "github.com/ory/dockertest/v3/docker/types/container" + "github.com/ory/dockertest/v3/docker/types/filters" + "github.com/ory/dockertest/v3/docker/types/mount" + "github.com/ory/dockertest/v3/docker/types/network" + "github.com/ory/dockertest/v3/docker/types/registry" "github.com/docker/go-connections/nat" ) @@ -102,10 +101,9 @@ type ContainerStats struct { // Ping contains response of Engine API: // GET "/_ping" type Ping struct { - APIVersion string - OSType string - Experimental bool - BuilderVersion BuilderVersion + APIVersion string + OSType string + Experimental bool } // ComponentVersion describes the version information for a specific component. @@ -158,12 +156,10 @@ type Info struct { MemoryLimit bool SwapLimit bool KernelMemory bool - KernelMemoryTCP bool CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool CPUSet bool - PidsLimit bool IPv4Forwarding bool BridgeNfIptables bool BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` @@ -183,7 +179,7 @@ type Info struct { RegistryConfig *registry.ServiceConfig NCPU int MemTotal int64 - GenericResources []swarm.GenericResource + //GenericResources []swarm.GenericResource DockerRootDir string HTTPProxy string `json:"HttpProxy"` HTTPSProxy string `json:"HttpsProxy"` @@ -196,7 +192,7 @@ type Info struct { ClusterAdvertise string Runtimes map[string]Runtime DefaultRuntime string - Swarm swarm.Info + //Swarm swarm.Info // LiveRestoreEnabled determines whether containers should be kept // running when the daemon is shutdown or upon daemon start if // running containers are detected @@ -207,8 +203,6 @@ type Info struct { RuncCommit Commit InitCommit Commit SecurityOptions []string - ProductLicense string `json:",omitempty"` - Warnings []string } // KeyValue holds a key/value pair @@ -517,8 +511,7 @@ type DiskUsage struct { Images []*ImageSummary Containers []*Container Volumes []*Volume - BuildCache []*BuildCache - BuilderSize int64 // deprecated + BuilderSize int64 } // ContainersPruneReport contains the response for Engine API: @@ -545,7 +538,6 @@ type ImagesPruneReport struct { // BuildCachePruneReport contains the response for Engine API: // POST "/build/prune" type BuildCachePruneReport struct { - CachesDeleted []string SpaceReclaimed uint64 } @@ -592,24 +584,3 @@ type PushResult struct { type BuildResult struct { ID string } - -// BuildCache contains information about a build cache record -type BuildCache struct { - ID string - Parent string - Type string - Description string - InUse bool - Shared bool - Size int64 - CreatedAt time.Time - LastUsedAt *time.Time - UsageCount int -} - -// BuildCachePruneOptions hold parameters to prune the build cache -type BuildCachePruneOptions struct { - All bool - KeepStorage int64 - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/ory/dockertest/v3/docker/types/versions/README.md similarity index 100% rename from vendor/github.com/docker/docker/api/types/versions/README.md rename to vendor/github.com/ory/dockertest/v3/docker/types/versions/README.md diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/ory/dockertest/v3/docker/types/versions/compare.go similarity index 94% rename from vendor/github.com/docker/docker/api/types/versions/compare.go rename to vendor/github.com/ory/dockertest/v3/docker/types/versions/compare.go index 8ccb0aa9..0e1b17ca 100644 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/ory/dockertest/v3/docker/types/versions/compare.go @@ -1,4 +1,4 @@ -package versions // import "github.com/docker/docker/api/types/versions" +package versions // import "github.com/ory/dockertest/v3/docker/types/versions" import ( "strconv" diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/ory/dockertest/v3/docker/types/volume.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/volume.go rename to vendor/github.com/ory/dockertest/v3/docker/types/volume.go diff --git a/vendor/github.com/ory/dockertest/v3/docker/volume.go b/vendor/github.com/ory/dockertest/v3/docker/volume.go new file mode 100644 index 00000000..021a262b --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/docker/volume.go @@ -0,0 +1,190 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "context" + "encoding/json" + "errors" + "net/http" +) + +var ( + // ErrNoSuchVolume is the error returned when the volume does not exist. + ErrNoSuchVolume = errors.New("no such volume") + + // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use. + ErrVolumeInUse = errors.New("volume in use and cannot be removed") +) + +// Volume represents a volume. +// +// See https://goo.gl/3wgTsd for more details. +type Volume struct { + Name string `json:"Name" yaml:"Name" toml:"Name"` + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"` + Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty" toml:"Mountpoint,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"` + Options map[string]string `json:"Options,omitempty" yaml:"Options,omitempty" toml:"Options,omitempty"` +} + +// ListVolumesOptions specify parameters to the ListVolumes function. +// +// See https://goo.gl/3wgTsd for more details. +type ListVolumesOptions struct { + Filters map[string][]string + Context context.Context +} + +// ListVolumes returns a list of available volumes in the server. +// +// See https://goo.gl/3wgTsd for more details. +func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) { + resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{ + context: opts.Context, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + m := make(map[string]interface{}) + if err = json.NewDecoder(resp.Body).Decode(&m); err != nil { + return nil, err + } + var volumes []Volume + volumesJSON, ok := m["Volumes"] + if !ok { + return volumes, nil + } + data, err := json.Marshal(volumesJSON) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, &volumes); err != nil { + return nil, err + } + return volumes, nil +} + +// CreateVolumeOptions specify parameters to the CreateVolume function. +// +// See https://goo.gl/qEhmEC for more details. +type CreateVolumeOptions struct { + Name string + Driver string + DriverOpts map[string]string + Context context.Context `json:"-"` + Labels map[string]string +} + +// CreateVolume creates a volume on the server. +// +// See https://goo.gl/qEhmEC for more details. +func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { + resp, err := c.do("POST", "/volumes/create", doOptions{ + data: opts, + context: opts.Context, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var volume Volume + if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { + return nil, err + } + return &volume, nil +} + +// InspectVolume returns a volume by its name. +// +// See https://goo.gl/GMjsMc for more details. +func (c *Client) InspectVolume(name string) (*Volume, error) { + resp, err := c.do("GET", "/volumes/"+name, doOptions{}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return nil, ErrNoSuchVolume + } + return nil, err + } + defer resp.Body.Close() + var volume Volume + if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { + return nil, err + } + return &volume, nil +} + +// RemoveVolume removes a volume by its name. +// +// Deprecated: Use RemoveVolumeWithOptions instead. +func (c *Client) RemoveVolume(name string) error { + return c.RemoveVolumeWithOptions(RemoveVolumeOptions{Name: name}) +} + +// RemoveVolumeOptions specify parameters to the RemoveVolumeWithOptions +// function. +// +// See https://goo.gl/nvd6qj for more details. +type RemoveVolumeOptions struct { + Context context.Context + Name string `qs:"-"` + Force bool +} + +// RemoveVolumeWithOptions removes a volume by its name and takes extra +// parameters. +// +// See https://goo.gl/nvd6qj for more details. +func (c *Client) RemoveVolumeWithOptions(opts RemoveVolumeOptions) error { + path := "/volumes/" + opts.Name + resp, err := c.do("DELETE", path+"?"+queryString(opts), doOptions{context: opts.Context}) + if err != nil { + if e, ok := err.(*Error); ok { + if e.Status == http.StatusNotFound { + return ErrNoSuchVolume + } + if e.Status == http.StatusConflict { + return ErrVolumeInUse + } + } + return err + } + defer resp.Body.Close() + return nil +} + +// PruneVolumesOptions specify parameters to the PruneVolumes function. +// +// See https://goo.gl/f9XDem for more details. +type PruneVolumesOptions struct { + Filters map[string][]string + Context context.Context +} + +// PruneVolumesResults specify results from the PruneVolumes function. +// +// See https://goo.gl/f9XDem for more details. +type PruneVolumesResults struct { + VolumesDeleted []string + SpaceReclaimed int64 +} + +// PruneVolumes deletes volumes which are unused. +// +// See https://goo.gl/f9XDem for more details. +func (c *Client) PruneVolumes(opts PruneVolumesOptions) (*PruneVolumesResults, error) { + path := "/volumes/prune?" + queryString(opts) + resp, err := c.do("POST", path, doOptions{context: opts.Context}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var results PruneVolumesResults + if err := json.NewDecoder(resp.Body).Decode(&results); err != nil { + return nil, err + } + return &results, nil +} diff --git a/vendor/github.com/ory/dockertest/v3/dockertest.go b/vendor/github.com/ory/dockertest/v3/dockertest.go new file mode 100644 index 00000000..66636bc7 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/dockertest.go @@ -0,0 +1,599 @@ +package dockertest + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/cenkalti/backoff/v3" + dc "github.com/ory/dockertest/v3/docker" + options "github.com/ory/dockertest/v3/docker/opts" + "github.com/pkg/errors" +) + +var ( + ErrNotInContainer = errors.New("not running in container") +) + +// Pool represents a connection to the docker API and is used to create and remove docker images. +type Pool struct { + Client *dc.Client + MaxWait time.Duration +} + +// Network represents a docker network. +type Network struct { + pool *Pool + Network *dc.Network +} + +// Close removes network by calling pool.RemoveNetwork. +func (n *Network) Close() error { + return n.pool.RemoveNetwork(n) +} + +// Resource represents a docker container. +type Resource struct { + pool *Pool + Container *dc.Container +} + +// GetPort returns a resource's published port. You can use it to connect to the service via localhost, e.g. tcp://localhost:1231/ +func (r *Resource) GetPort(id string) string { + if r.Container == nil || r.Container.NetworkSettings == nil { + return "" + } + + m, ok := r.Container.NetworkSettings.Ports[dc.Port(id)] + if !ok || len(m) == 0 { + return "" + } + + return m[0].HostPort +} + +// GetBoundIP returns a resource's published IP address. +func (r *Resource) GetBoundIP(id string) string { + if r.Container == nil || r.Container.NetworkSettings == nil { + return "" + } + + m, ok := r.Container.NetworkSettings.Ports[dc.Port(id)] + if !ok || len(m) == 0 { + return "" + } + + return m[0].HostIP +} + +// GetHostPort returns a resource's published port with an address. +func (r *Resource) GetHostPort(portID string) string { + if r.Container == nil || r.Container.NetworkSettings == nil { + return "" + } + + m, ok := r.Container.NetworkSettings.Ports[dc.Port(portID)] + if !ok || len(m) == 0 { + return "" + } + + ip := m[0].HostIP + if ip == "0.0.0.0" { + ip = "localhost" + } + return net.JoinHostPort(ip, m[0].HostPort) +} + +type ExecOptions struct { + // Command environment, optional. + Env []string + + // StdIn will be attached as command stdin if provided. + StdIn io.Reader + + // StdOut will be attached as command stdout if provided. + StdOut io.Writer + + // StdErr will be attached as command stdout if provided. + StdErr io.Writer + + // Allocate TTY for command or not. + TTY bool +} + +// Exec executes command within container. +func (r *Resource) Exec(cmd []string, opts ExecOptions) (exitCode int, err error) { + exec, err := r.pool.Client.CreateExec(dc.CreateExecOptions{ + Container: r.Container.ID, + Cmd: cmd, + Env: opts.Env, + AttachStderr: opts.StdErr != nil, + AttachStdout: opts.StdOut != nil, + AttachStdin: opts.StdIn != nil, + Tty: opts.TTY, + }) + if err != nil { + return -1, errors.Wrap(err, "Create exec failed") + } + + err = r.pool.Client.StartExec(exec.ID, dc.StartExecOptions{ + InputStream: opts.StdIn, + OutputStream: opts.StdOut, + ErrorStream: opts.StdErr, + Tty: opts.TTY, + }) + if err != nil { + return -1, errors.Wrap(err, "Start exec failed") + } + + inspectExec, err := r.pool.Client.InspectExec(exec.ID) + if err != nil { + return -1, errors.Wrap(err, "Inspect exec failed") + } + + return inspectExec.ExitCode, nil +} + +// GetIPInNetwork returns container IP address in network. +func (r *Resource) GetIPInNetwork(network *Network) string { + if r.Container == nil || r.Container.NetworkSettings == nil { + return "" + } + + netCfg, ok := r.Container.NetworkSettings.Networks[network.Network.Name] + if !ok { + return "" + } + + return netCfg.IPAddress +} + +// ConnectToNetwork connects container to network. +func (r *Resource) ConnectToNetwork(network *Network) error { + err := r.pool.Client.ConnectNetwork( + network.Network.ID, + dc.NetworkConnectionOptions{Container: r.Container.ID}, + ) + if err != nil { + return errors.Wrap(err, "Failed to connect container to network") + } + + // refresh internal representation + r.Container, err = r.pool.Client.InspectContainer(r.Container.ID) + if err != nil { + return errors.Wrap(err, "Failed to refresh container information") + } + + network.Network, err = r.pool.Client.NetworkInfo(network.Network.ID) + if err != nil { + return errors.Wrap(err, "Failed to refresh network information") + } + + return nil +} + +// DisconnectFromNetwork disconnects container from network. +func (r *Resource) DisconnectFromNetwork(network *Network) error { + err := r.pool.Client.DisconnectNetwork( + network.Network.ID, + dc.NetworkConnectionOptions{Container: r.Container.ID}, + ) + if err != nil { + return errors.Wrap(err, "Failed to connect container to network") + } + + // refresh internal representation + r.Container, err = r.pool.Client.InspectContainer(r.Container.ID) + if err != nil { + return errors.Wrap(err, "Failed to refresh container information") + } + + network.Network, err = r.pool.Client.NetworkInfo(network.Network.ID) + if err != nil { + return errors.Wrap(err, "Failed to refresh network information") + } + + return nil +} + +// Close removes a container and linked volumes from docker by calling pool.Purge. +func (r *Resource) Close() error { + return r.pool.Purge(r) +} + +// Expire sets a resource's associated container to terminate after a period has passed +func (r *Resource) Expire(seconds uint) error { + go func() { + if err := r.pool.Client.StopContainer(r.Container.ID, seconds); err != nil { + // Error handling? + } + }() + return nil +} + +// NewTLSPool creates a new pool given an endpoint and the certificate path. This is required for endpoints that +// require TLS communication. +func NewTLSPool(endpoint, certpath string) (*Pool, error) { + ca := fmt.Sprintf("%s/ca.pem", certpath) + cert := fmt.Sprintf("%s/cert.pem", certpath) + key := fmt.Sprintf("%s/key.pem", certpath) + + client, err := dc.NewTLSClient(endpoint, cert, key, ca) + if err != nil { + return nil, errors.Wrap(err, "") + } + + return &Pool{ + Client: client, + }, nil +} + +// NewPool creates a new pool. You can pass an empty string to use the default, which is taken from the environment +// variable DOCKER_HOST and DOCKER_URL, or from docker-machine if the environment variable DOCKER_MACHINE_NAME is set, +// or if neither is defined a sensible default for the operating system you are on. +// TLS pools are automatically configured if the DOCKER_CERT_PATH environment variable exists. +func NewPool(endpoint string) (*Pool, error) { + if endpoint == "" { + if os.Getenv("DOCKER_MACHINE_NAME") != "" { + client, err := dc.NewClientFromEnv() + if err != nil { + return nil, errors.Wrap(err, "failed to create client from environment") + } + + return &Pool{Client: client}, nil + } + if os.Getenv("DOCKER_HOST") != "" { + endpoint = os.Getenv("DOCKER_HOST") + } else if os.Getenv("DOCKER_URL") != "" { + endpoint = os.Getenv("DOCKER_URL") + } else if runtime.GOOS == "windows" { + endpoint = "http://localhost:2375" + } else { + endpoint = "unix:///var/run/docker.sock" + } + } + + if os.Getenv("DOCKER_CERT_PATH") != "" && shouldPreferTLS(endpoint) { + return NewTLSPool(endpoint, os.Getenv("DOCKER_CERT_PATH")) + } + + client, err := dc.NewClient(endpoint) + if err != nil { + return nil, errors.Wrap(err, "") + } + + return &Pool{ + Client: client, + }, nil +} + +func shouldPreferTLS(endpoint string) bool { + return !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "unix://") +} + +// RunOptions is used to pass in optional parameters when running a container. +type RunOptions struct { + Hostname string + Name string + Repository string + Tag string + Env []string + Entrypoint []string + Cmd []string + Mounts []string + Links []string + ExposedPorts []string + ExtraHosts []string + CapAdd []string + SecurityOpt []string + DNS []string + WorkingDir string + NetworkID string + Networks []*Network // optional networks to join + Labels map[string]string + Auth dc.AuthConfiguration + PortBindings map[dc.Port][]dc.PortBinding + Privileged bool +} + +// BuildOptions is used to pass in optional parameters when building a container +type BuildOptions struct { + Dockerfile string + ContextDir string +} + +// BuildAndRunWithBuildOptions builds and starts a docker container. +// Optional modifier functions can be passed in order to change the hostconfig values not covered in RunOptions +func (d *Pool) BuildAndRunWithBuildOptions(buildOpts *BuildOptions, runOpts *RunOptions, hcOpts ...func(*dc.HostConfig)) (*Resource, error) { + err := d.Client.BuildImage(dc.BuildImageOptions{ + Name: runOpts.Name, + Dockerfile: buildOpts.Dockerfile, + OutputStream: ioutil.Discard, + ContextDir: buildOpts.ContextDir, + }) + + if err != nil { + return nil, errors.Wrap(err, "") + } + + runOpts.Repository = runOpts.Name + + return d.RunWithOptions(runOpts, hcOpts...) +} + +// BuildAndRunWithOptions builds and starts a docker container. +// Optional modifier functions can be passed in order to change the hostconfig values not covered in RunOptions +func (d *Pool) BuildAndRunWithOptions(dockerfilePath string, opts *RunOptions, hcOpts ...func(*dc.HostConfig)) (*Resource, error) { + // Set the Dockerfile folder as build context + dir, file := filepath.Split(dockerfilePath) + buildOpts := BuildOptions{Dockerfile: file, ContextDir: dir} + return d.BuildAndRunWithBuildOptions(&buildOpts, opts, hcOpts...) +} + +// BuildAndRun builds and starts a docker container +func (d *Pool) BuildAndRun(name, dockerfilePath string, env []string) (*Resource, error) { + return d.BuildAndRunWithOptions(dockerfilePath, &RunOptions{Name: name, Env: env}) +} + +// RunWithOptions starts a docker container. +// Optional modifier functions can be passed in order to change the hostconfig values not covered in RunOptions +// +// pool.RunWithOptions(&RunOptions{Repository: "mongo", Cmd: []string{"mongod", "--smallfiles"}}) +// pool.RunWithOptions(&RunOptions{Repository: "mongo", Cmd: []string{"mongod", "--smallfiles"}}, func(hostConfig *dc.HostConfig) { +// hostConfig.ShmSize = shmemsize +// }) +func (d *Pool) RunWithOptions(opts *RunOptions, hcOpts ...func(*dc.HostConfig)) (*Resource, error) { + repository := opts.Repository + tag := opts.Tag + env := opts.Env + cmd := opts.Cmd + ep := opts.Entrypoint + wd := opts.WorkingDir + var exp map[dc.Port]struct{} + + if len(opts.ExposedPorts) > 0 { + exp = map[dc.Port]struct{}{} + for _, p := range opts.ExposedPorts { + exp[dc.Port(p)] = struct{}{} + } + } + + mounts := []dc.Mount{} + + for _, m := range opts.Mounts { + s, d, err := options.MountParser(m) + if err != nil { + return nil, err + } + mounts = append(mounts, dc.Mount{ + Source: s, + Destination: d, + RW: true, + }) + } + + if tag == "" { + tag = "latest" + } + + networkingConfig := dc.NetworkingConfig{ + EndpointsConfig: map[string]*dc.EndpointConfig{}, + } + if opts.NetworkID != "" { + networkingConfig.EndpointsConfig[opts.NetworkID] = &dc.EndpointConfig{} + } + for _, network := range opts.Networks { + networkingConfig.EndpointsConfig[network.Network.ID] = &dc.EndpointConfig{} + } + + _, err := d.Client.InspectImage(fmt.Sprintf("%s:%s", repository, tag)) + if err != nil { + if err := d.Client.PullImage(dc.PullImageOptions{ + Repository: repository, + Tag: tag, + }, opts.Auth); err != nil { + return nil, errors.Wrap(err, "") + } + } + + hostConfig := dc.HostConfig{ + PublishAllPorts: true, + Binds: opts.Mounts, + Links: opts.Links, + PortBindings: opts.PortBindings, + ExtraHosts: opts.ExtraHosts, + CapAdd: opts.CapAdd, + SecurityOpt: opts.SecurityOpt, + Privileged: opts.Privileged, + DNS: opts.DNS, + } + + for _, hostConfigOption := range hcOpts { + hostConfigOption(&hostConfig) + } + + c, err := d.Client.CreateContainer(dc.CreateContainerOptions{ + Name: opts.Name, + Config: &dc.Config{ + Hostname: opts.Hostname, + Image: fmt.Sprintf("%s:%s", repository, tag), + Env: env, + Entrypoint: ep, + Cmd: cmd, + Mounts: mounts, + ExposedPorts: exp, + WorkingDir: wd, + Labels: opts.Labels, + StopSignal: "SIGWINCH", // to support timeouts + }, + HostConfig: &hostConfig, + NetworkingConfig: &networkingConfig, + }) + if err != nil { + return nil, errors.Wrap(err, "") + } + + if err := d.Client.StartContainer(c.ID, nil); err != nil { + return nil, errors.Wrap(err, "") + } + + c, err = d.Client.InspectContainer(c.ID) + if err != nil { + return nil, errors.Wrap(err, "") + } + + for _, network := range opts.Networks { + network.Network, err = d.Client.NetworkInfo(network.Network.ID) + if err != nil { + return nil, errors.Wrap(err, "") + } + } + + return &Resource{ + pool: d, + Container: c, + }, nil +} + +// Run starts a docker container. +// +// pool.Run("mysql", "5.3", []string{"FOO=BAR", "BAR=BAZ"}) +func (d *Pool) Run(repository, tag string, env []string) (*Resource, error) { + return d.RunWithOptions(&RunOptions{Repository: repository, Tag: tag, Env: env}) +} + +// ContainerByName finds a container with the given name and returns it if present +func (d *Pool) ContainerByName(containerName string) (*Resource, bool) { + containers, err := d.Client.ListContainers(dc.ListContainersOptions{ + All: true, + Filters: map[string][]string{ + "name": {containerName}, + }, + }) + + if err != nil { + return nil, false + } + + if len(containers) == 0 { + return nil, false + } + + c, err := d.Client.InspectContainer(containers[0].ID) + if err != nil { + return nil, false + } + + return &Resource{ + pool: d, + Container: c, + }, true +} + +// RemoveContainerByName find a container with the given name and removes it if present +func (d *Pool) RemoveContainerByName(containerName string) error { + containers, err := d.Client.ListContainers(dc.ListContainersOptions{ + All: true, + Filters: map[string][]string{ + "name": []string{containerName}, + }, + }) + if err != nil { + return errors.Wrapf(err, "Error while listing containers with name %s", containerName) + } + + if len(containers) == 0 { + return nil + } + + err = d.Client.RemoveContainer(dc.RemoveContainerOptions{ + ID: containers[0].ID, + Force: true, + RemoveVolumes: true, + }) + if err != nil { + return errors.Wrapf(err, "Error while removing container with name %s", containerName) + } + + return nil +} + +// Purge removes a container and linked volumes from docker. +func (d *Pool) Purge(r *Resource) error { + if err := d.Client.RemoveContainer(dc.RemoveContainerOptions{ID: r.Container.ID, Force: true, RemoveVolumes: true}); err != nil { + return errors.Wrap(err, "") + } + + return nil +} + +// Retry is an exponential backoff retry helper. You can use it to wait for e.g. mysql to boot up. +func (d *Pool) Retry(op func() error) error { + if d.MaxWait == 0 { + d.MaxWait = time.Minute + } + bo := backoff.NewExponentialBackOff() + bo.MaxInterval = time.Second * 5 + bo.MaxElapsedTime = d.MaxWait + return backoff.Retry(op, bo) +} + +// CurrentContainer returns current container descriptor if this function called within running container. +// It returns ErrNotInContainer as error if this function running not in container. +func (d *Pool) CurrentContainer() (*Resource, error) { + // docker daemon puts short container id into hostname + hostname, err := os.Hostname() + if err != nil { + return nil, errors.Wrap(err, "Get hostname failed") + } + + container, err := d.Client.InspectContainer(hostname) + switch err.(type) { + case nil: + return &Resource{ + pool: d, + Container: container, + }, nil + case *dc.NoSuchContainer: + return nil, ErrNotInContainer + default: + return nil, errors.Wrap(err, "") + } +} + +// CreateNetwork creates docker network. It's useful for linking multiple containers. +func (d *Pool) CreateNetwork(name string, opts ...func(config *dc.CreateNetworkOptions)) (*Network, error) { + var cfg dc.CreateNetworkOptions + cfg.Name = name + for _, opt := range opts { + opt(&cfg) + } + + network, err := d.Client.CreateNetwork(cfg) + if err != nil { + return nil, errors.Wrap(err, "") + } + + return &Network{ + pool: d, + Network: network, + }, nil +} + +// RemoveNetwork disconnects containers and removes provided network. +func (d *Pool) RemoveNetwork(network *Network) error { + for container := range network.Network.Containers { + _ = d.Client.DisconnectNetwork( + network.Network.ID, + dc.NetworkConnectionOptions{Container: container, Force: true}, + ) + } + + return d.Client.RemoveNetwork(network.Network.ID) +} diff --git a/vendor/github.com/ory/dockertest/v3/go.mod b/vendor/github.com/ory/dockertest/v3/go.mod new file mode 100644 index 00000000..5e73fd51 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/go.mod @@ -0,0 +1,25 @@ +module github.com/ory/dockertest/v3 + +go 1.13 + +require ( + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 + github.com/Microsoft/go-winio v0.4.14 + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 + github.com/cenkalti/backoff/v3 v3.0.0 + github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 + github.com/docker/go-connections v0.4.0 + github.com/docker/go-units v0.4.0 + github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect + github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2 + github.com/opencontainers/go-digest v1.0.0-rc1 // indirect + github.com/opencontainers/image-spec v1.0.1 + github.com/opencontainers/runc v1.0.0-rc9 + github.com/pkg/errors v0.8.1 + github.com/sirupsen/logrus v1.4.2 + github.com/stretchr/testify v1.4.0 + golang.org/x/net v0.0.0-20191003171128-d98b1b443823 + golang.org/x/sys v0.0.0-20200121082415-34d275377bf9 + gopkg.in/yaml.v2 v2.2.7 // indirect + gotest.tools/v3 v3.0.2 +) diff --git a/vendor/github.com/ory/dockertest/v3/go.sum b/vendor/github.com/ory/dockertest/v3/go.sum new file mode 100644 index 00000000..58391999 --- /dev/null +++ b/vendor/github.com/ory/dockertest/v3/go.sum @@ -0,0 +1,66 @@ +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2 h1:hRGSmZu7j271trc9sneMrpOW7GN5ngLm8YUZIPzf394= +github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20191003171128-d98b1b443823 h1:Ypyv6BNJh07T1pUSrehkLemqPKXhus2MkfktJ91kRh4= +golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200121082415-34d275377bf9 h1:N19i1HjUnR7TF7rMt8O4p3dLvqvmYyzB6ifMFmrbY50= +golang.org/x/sys v0.0.0-20200121082415-34d275377bf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/vendor/github.com/posener/complete/.gitignore b/vendor/github.com/posener/complete/.gitignore deleted file mode 100644 index 293955f9..00000000 --- a/vendor/github.com/posener/complete/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.idea -coverage.txt -gocomplete/gocomplete -example/self/self diff --git a/vendor/github.com/posener/complete/.travis.yml b/vendor/github.com/posener/complete/.travis.yml deleted file mode 100644 index 2fae9454..00000000 --- a/vendor/github.com/posener/complete/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -sudo: false -go: - - 1.11 - - 1.10.x - - 1.9 - - 1.8 - -before_install: - - go get -u -t ./... - -script: - - GO111MODULE=on ./test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt deleted file mode 100644 index 16249b4a..00000000 --- a/vendor/github.com/posener/complete/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License - -Copyright (c) 2017 Eyal Posener - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go deleted file mode 100644 index 17ab2c6d..00000000 --- a/vendor/github.com/posener/complete/args.go +++ /dev/null @@ -1,111 +0,0 @@ -package complete - -import ( - "os" - "path/filepath" - "strings" - "unicode" -) - -// Args describes command line arguments -type Args struct { - // All lists of all arguments in command line (not including the command itself) - All []string - // Completed lists of all completed arguments in command line, - // If the last one is still being typed - no space after it, - // it won't appear in this list of arguments. - Completed []string - // Last argument in command line, the one being typed, if the last - // character in the command line is a space, this argument will be empty, - // otherwise this would be the last word. - Last string - // LastCompleted is the last argument that was fully typed. - // If the last character in the command line is space, this would be the - // last word, otherwise, it would be the word before that. - LastCompleted string -} - -// Directory gives the directory of the current written -// last argument if it represents a file name being written. -// in case that it is not, we fall back to the current directory. -func (a Args) Directory() string { - if info, err := os.Stat(a.Last); err == nil && info.IsDir() { - return fixPathForm(a.Last, a.Last) - } - dir := filepath.Dir(a.Last) - if info, err := os.Stat(dir); err != nil || !info.IsDir() { - return "./" - } - return fixPathForm(a.Last, dir) -} - -func newArgs(line string) Args { - var ( - all []string - completed []string - ) - parts := splitFields(line) - if len(parts) > 0 { - all = parts[1:] - completed = removeLast(parts[1:]) - } - return Args{ - All: all, - Completed: completed, - Last: last(parts), - LastCompleted: last(completed), - } -} - -// splitFields returns a list of fields from the given command line. -// If the last character is space, it appends an empty field in the end -// indicating that the field before it was completed. -// If the last field is of the form "a=b", it splits it to two fields: "a", "b", -// So it can be completed. -func splitFields(line string) []string { - parts := strings.Fields(line) - - // Add empty field if the last field was completed. - if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) { - parts = append(parts, "") - } - - // Treat the last field if it is of the form "a=b" - parts = splitLastEqual(parts) - return parts -} - -func splitLastEqual(line []string) []string { - if len(line) == 0 { - return line - } - parts := strings.Split(line[len(line)-1], "=") - return append(line[:len(line)-1], parts...) -} - -func (a Args) from(i int) Args { - if i > len(a.All) { - i = len(a.All) - } - a.All = a.All[i:] - - if i > len(a.Completed) { - i = len(a.Completed) - } - a.Completed = a.Completed[i:] - return a -} - -func removeLast(a []string) []string { - if len(a) > 0 { - return a[:len(a)-1] - } - return a -} - -func last(args []string) string { - if len(args) == 0 { - return "" - } - return args[len(args)-1] -} diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go deleted file mode 100644 index b99fe529..00000000 --- a/vendor/github.com/posener/complete/cmd/cmd.go +++ /dev/null @@ -1,128 +0,0 @@ -// Package cmd used for command line options for the complete tool -package cmd - -import ( - "errors" - "flag" - "fmt" - "os" - "strings" - - "github.com/posener/complete/cmd/install" -) - -// CLI for command line -type CLI struct { - Name string - InstallName string - UninstallName string - - install bool - uninstall bool - yes bool -} - -const ( - defaultInstallName = "install" - defaultUninstallName = "uninstall" -) - -// Run is used when running complete in command line mode. -// this is used when the complete is not completing words, but to -// install it or uninstall it. -func (f *CLI) Run() bool { - err := f.validate() - if err != nil { - os.Stderr.WriteString(err.Error() + "\n") - os.Exit(1) - } - - switch { - case f.install: - f.prompt() - err = install.Install(f.Name) - case f.uninstall: - f.prompt() - err = install.Uninstall(f.Name) - default: - // non of the action flags matched, - // returning false should make the real program execute - return false - } - - if err != nil { - fmt.Printf("%s failed! %s\n", f.action(), err) - os.Exit(3) - } - fmt.Println("Done!") - return true -} - -// prompt use for approval -// exit if approval was not given -func (f *CLI) prompt() { - defer fmt.Println(f.action() + "ing...") - if f.yes { - return - } - fmt.Printf("%s completion for %s? ", f.action(), f.Name) - var answer string - fmt.Scanln(&answer) - - switch strings.ToLower(answer) { - case "y", "yes": - return - default: - fmt.Println("Cancelling...") - os.Exit(1) - } -} - -// AddFlags adds the CLI flags to the flag set. -// If flags is nil, the default command line flags will be taken. -// Pass non-empty strings as installName and uninstallName to override the default -// flag names. -func (f *CLI) AddFlags(flags *flag.FlagSet) { - if flags == nil { - flags = flag.CommandLine - } - - if f.InstallName == "" { - f.InstallName = defaultInstallName - } - if f.UninstallName == "" { - f.UninstallName = defaultUninstallName - } - - if flags.Lookup(f.InstallName) == nil { - flags.BoolVar(&f.install, f.InstallName, false, - fmt.Sprintf("Install completion for %s command", f.Name)) - } - if flags.Lookup(f.UninstallName) == nil { - flags.BoolVar(&f.uninstall, f.UninstallName, false, - fmt.Sprintf("Uninstall completion for %s command", f.Name)) - } - if flags.Lookup("y") == nil { - flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes' when installing completion") - } -} - -// validate the CLI -func (f *CLI) validate() error { - if f.install && f.uninstall { - return errors.New("Install and uninstall are mutually exclusive") - } - return nil -} - -// action name according to the CLI values. -func (f *CLI) action() string { - switch { - case f.install: - return "Install" - case f.uninstall: - return "Uninstall" - default: - return "unknown" - } -} diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go deleted file mode 100644 index a287f998..00000000 --- a/vendor/github.com/posener/complete/cmd/install/bash.go +++ /dev/null @@ -1,32 +0,0 @@ -package install - -import "fmt" - -// (un)install in bash -// basically adds/remove from .bashrc: -// -// complete -C
-type bash struct { - rc string -} - -func (b bash) Install(cmd, bin string) error { - completeCmd := b.cmd(cmd, bin) - if lineInFile(b.rc, completeCmd) { - return fmt.Errorf("already installed in %s", b.rc) - } - return appendToFile(b.rc, completeCmd) -} - -func (b bash) Uninstall(cmd, bin string) error { - completeCmd := b.cmd(cmd, bin) - if !lineInFile(b.rc, completeCmd) { - return fmt.Errorf("does not installed in %s", b.rc) - } - - return removeFromFile(b.rc, completeCmd) -} - -func (bash) cmd(cmd, bin string) string { - return fmt.Sprintf("complete -C %s %s", bin, cmd) -} diff --git a/vendor/github.com/posener/complete/cmd/install/fish.go b/vendor/github.com/posener/complete/cmd/install/fish.go deleted file mode 100644 index 6467196b..00000000 --- a/vendor/github.com/posener/complete/cmd/install/fish.go +++ /dev/null @@ -1,56 +0,0 @@ -package install - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "text/template" -) - -// (un)install in fish - -type fish struct { - configDir string -} - -func (f fish) Install(cmd, bin string) error { - completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) - completeCmd, err := f.cmd(cmd, bin) - if err != nil { - return err - } - if _, err := os.Stat(completionFile); err == nil { - return fmt.Errorf("already installed at %s", completionFile) - } - - return createFile(completionFile, completeCmd) -} - -func (f fish) Uninstall(cmd, bin string) error { - completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) - if _, err := os.Stat(completionFile); err != nil { - return fmt.Errorf("does not installed in %s", f.configDir) - } - - return os.Remove(completionFile) -} - -func (f fish) cmd(cmd, bin string) (string, error) { - var buf bytes.Buffer - params := struct{ Cmd, Bin string }{cmd, bin} - tmpl := template.Must(template.New("cmd").Parse(` -function __complete_{{.Cmd}} - set -lx COMP_LINE (string join ' ' (commandline -o)) - test (commandline -ct) = "" - and set COMP_LINE "$COMP_LINE " - {{.Bin}} -end -complete -c {{.Cmd}} -a "(__complete_{{.Cmd}})" -`)) - err := tmpl.Execute(&buf, params) - if err != nil { - return "", err - } - return buf.String(), nil -} diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go deleted file mode 100644 index dfa1963b..00000000 --- a/vendor/github.com/posener/complete/cmd/install/install.go +++ /dev/null @@ -1,119 +0,0 @@ -package install - -import ( - "errors" - "os" - "os/user" - "path/filepath" - - "github.com/hashicorp/go-multierror" -) - -type installer interface { - Install(cmd, bin string) error - Uninstall(cmd, bin string) error -} - -// Install complete command given: -// cmd: is the command name -func Install(cmd string) error { - is := installers() - if len(is) == 0 { - return errors.New("Did not find any shells to install") - } - bin, err := getBinaryPath() - if err != nil { - return err - } - - for _, i := range is { - errI := i.Install(cmd, bin) - if errI != nil { - err = multierror.Append(err, errI) - } - } - - return err -} - -// Uninstall complete command given: -// cmd: is the command name -func Uninstall(cmd string) error { - is := installers() - if len(is) == 0 { - return errors.New("Did not find any shells to uninstall") - } - bin, err := getBinaryPath() - if err != nil { - return err - } - - for _, i := range is { - errI := i.Uninstall(cmd, bin) - if errI != nil { - err = multierror.Append(err, errI) - } - } - - return err -} - -func installers() (i []installer) { - for _, rc := range [...]string{".bashrc", ".bash_profile", ".bash_login", ".profile"} { - if f := rcFile(rc); f != "" { - i = append(i, bash{f}) - break - } - } - if f := rcFile(".zshrc"); f != "" { - i = append(i, zsh{f}) - } - if d := fishConfigDir(); d != "" { - i = append(i, fish{d}) - } - return -} - -func fishConfigDir() string { - configDir := filepath.Join(getConfigHomePath(), "fish") - if configDir == "" { - return "" - } - if info, err := os.Stat(configDir); err != nil || !info.IsDir() { - return "" - } - return configDir -} - -func getConfigHomePath() string { - u, err := user.Current() - if err != nil { - return "" - } - - configHome := os.Getenv("XDG_CONFIG_HOME") - if configHome == "" { - return filepath.Join(u.HomeDir, ".config") - } - return configHome -} - -func getBinaryPath() (string, error) { - bin, err := os.Executable() - if err != nil { - return "", err - } - return filepath.Abs(bin) -} - -func rcFile(name string) string { - u, err := user.Current() - if err != nil { - return "" - } - path := filepath.Join(u.HomeDir, name) - if _, err := os.Stat(path); err != nil { - return "" - } - return path -} diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go deleted file mode 100644 index d34ac8ca..00000000 --- a/vendor/github.com/posener/complete/cmd/install/utils.go +++ /dev/null @@ -1,140 +0,0 @@ -package install - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" -) - -func lineInFile(name string, lookFor string) bool { - f, err := os.Open(name) - if err != nil { - return false - } - defer f.Close() - r := bufio.NewReader(f) - prefix := []byte{} - for { - line, isPrefix, err := r.ReadLine() - if err == io.EOF { - return false - } - if err != nil { - return false - } - if isPrefix { - prefix = append(prefix, line...) - continue - } - line = append(prefix, line...) - if string(line) == lookFor { - return true - } - prefix = prefix[:0] - } -} - -func createFile(name string, content string) error { - // make sure file directory exists - if err := os.MkdirAll(filepath.Dir(name), 0775); err != nil { - return err - } - - // create the file - f, err := os.Create(name) - if err != nil { - return err - } - defer f.Close() - - // write file content - _, err = f.WriteString(fmt.Sprintf("%s\n", content)) - return err -} - -func appendToFile(name string, content string) error { - f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) - if err != nil { - return err - } - defer f.Close() - _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) - return err -} - -func removeFromFile(name string, content string) error { - backup := name + ".bck" - err := copyFile(name, backup) - if err != nil { - return err - } - temp, err := removeContentToTempFile(name, content) - if err != nil { - return err - } - - err = copyFile(temp, name) - if err != nil { - return err - } - - return os.Remove(backup) -} - -func removeContentToTempFile(name, content string) (string, error) { - rf, err := os.Open(name) - if err != nil { - return "", err - } - defer rf.Close() - wf, err := ioutil.TempFile("/tmp", "complete-") - if err != nil { - return "", err - } - defer wf.Close() - - r := bufio.NewReader(rf) - prefix := []byte{} - for { - line, isPrefix, err := r.ReadLine() - if err == io.EOF { - break - } - if err != nil { - return "", err - } - if isPrefix { - prefix = append(prefix, line...) - continue - } - line = append(prefix, line...) - str := string(line) - if str == content { - continue - } - _, err = wf.WriteString(str + "\n") - if err != nil { - return "", err - } - prefix = prefix[:0] - } - return wf.Name(), nil -} - -func copyFile(src string, dst string) error { - in, err := os.Open(src) - if err != nil { - return err - } - defer in.Close() - out, err := os.Create(dst) - if err != nil { - return err - } - defer out.Close() - _, err = io.Copy(out, in) - return err -} diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go deleted file mode 100644 index a625f53c..00000000 --- a/vendor/github.com/posener/complete/cmd/install/zsh.go +++ /dev/null @@ -1,39 +0,0 @@ -package install - -import "fmt" - -// (un)install in zsh -// basically adds/remove from .zshrc: -// -// autoload -U +X bashcompinit && bashcompinit" -// complete -C
-type zsh struct { - rc string -} - -func (z zsh) Install(cmd, bin string) error { - completeCmd := z.cmd(cmd, bin) - if lineInFile(z.rc, completeCmd) { - return fmt.Errorf("already installed in %s", z.rc) - } - - bashCompInit := "autoload -U +X bashcompinit && bashcompinit" - if !lineInFile(z.rc, bashCompInit) { - completeCmd = bashCompInit + "\n" + completeCmd - } - - return appendToFile(z.rc, completeCmd) -} - -func (z zsh) Uninstall(cmd, bin string) error { - completeCmd := z.cmd(cmd, bin) - if !lineInFile(z.rc, completeCmd) { - return fmt.Errorf("does not installed in %s", z.rc) - } - - return removeFromFile(z.rc, completeCmd) -} - -func (zsh) cmd(cmd, bin string) string { - return fmt.Sprintf("complete -o nospace -C %s %s", bin, cmd) -} diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go deleted file mode 100644 index 82d37d52..00000000 --- a/vendor/github.com/posener/complete/command.go +++ /dev/null @@ -1,111 +0,0 @@ -package complete - -// Command represents a command line -// It holds the data that enables auto completion of command line -// Command can also be a sub command. -type Command struct { - // Sub is map of sub commands of the current command - // The key refer to the sub command name, and the value is it's - // Command descriptive struct. - Sub Commands - - // Flags is a map of flags that the command accepts. - // The key is the flag name, and the value is it's predictions. - Flags Flags - - // GlobalFlags is a map of flags that the command accepts. - // Global flags that can appear also after a sub command. - GlobalFlags Flags - - // Args are extra arguments that the command accepts, those who are - // given without any flag before. - Args Predictor -} - -// Predict returns all possible predictions for args according to the command struct -func (c *Command) Predict(a Args) []string { - options, _ := c.predict(a) - return options -} - -// Commands is the type of Sub member, it maps a command name to a command struct -type Commands map[string]Command - -// Predict completion of sub command names names according to command line arguments -func (c Commands) Predict(a Args) (prediction []string) { - for sub := range c { - prediction = append(prediction, sub) - } - return -} - -// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. -type Flags map[string]Predictor - -// Predict completion of flags names according to command line arguments -func (f Flags) Predict(a Args) (prediction []string) { - for flag := range f { - // If the flag starts with a hyphen, we avoid emitting the prediction - // unless the last typed arg contains a hyphen as well. - flagHyphenStart := len(flag) != 0 && flag[0] == '-' - lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-' - if flagHyphenStart && !lastHyphenStart { - continue - } - prediction = append(prediction, flag) - } - return -} - -// predict options -// only is set to true if no more options are allowed to be returned -// those are in cases of special flag that has specific completion arguments, -// and other flags or sub commands can't come after it. -func (c *Command) predict(a Args) (options []string, only bool) { - - // search sub commands for predictions first - subCommandFound := false - for i, arg := range a.Completed { - if cmd, ok := c.Sub[arg]; ok { - subCommandFound = true - - // recursive call for sub command - options, only = cmd.predict(a.from(i)) - if only { - return - } - - // We matched so stop searching. Continuing to search can accidentally - // match a subcommand with current set of commands, see issue #46. - break - } - } - - // if last completed word is a global flag that we need to complete - if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { - Log("Predicting according to global flag %s", a.LastCompleted) - return predictor.Predict(a), true - } - - options = append(options, c.GlobalFlags.Predict(a)...) - - // if a sub command was entered, we won't add the parent command - // completions and we return here. - if subCommandFound { - return - } - - // if last completed word is a command flag that we need to complete - if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { - Log("Predicting according to flag %s", a.LastCompleted) - return predictor.Predict(a), true - } - - options = append(options, c.Sub.Predict(a)...) - options = append(options, c.Flags.Predict(a)...) - if c.Args != nil { - options = append(options, c.Args.Predict(a)...) - } - - return -} diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go deleted file mode 100644 index 725c4deb..00000000 --- a/vendor/github.com/posener/complete/complete.go +++ /dev/null @@ -1,109 +0,0 @@ -// Package complete provides a tool for bash writing bash completion in go. -// -// Writing bash completion scripts is a hard work. This package provides an easy way -// to create bash completion scripts for any command, and also an easy way to install/uninstall -// the completion of the command. -package complete - -import ( - "flag" - "fmt" - "io" - "os" - "strconv" - - "github.com/posener/complete/cmd" - "github.com/posener/complete/match" -) - -const ( - envLine = "COMP_LINE" - envPoint = "COMP_POINT" - envDebug = "COMP_DEBUG" -) - -// Complete structs define completion for a command with CLI options -type Complete struct { - Command Command - cmd.CLI - Out io.Writer -} - -// New creates a new complete command. -// name is the name of command we want to auto complete. -// IMPORTANT: it must be the same name - if the auto complete -// completes the 'go' command, name must be equal to "go". -// command is the struct of the command completion. -func New(name string, command Command) *Complete { - return &Complete{ - Command: command, - CLI: cmd.CLI{Name: name}, - Out: os.Stdout, - } -} - -// Run runs the completion and add installation flags beforehand. -// The flags are added to the main flag CommandLine variable. -func (c *Complete) Run() bool { - c.AddFlags(nil) - flag.Parse() - return c.Complete() -} - -// Complete a command from completion line in environment variable, -// and print out the complete options. -// returns success if the completion ran or if the cli matched -// any of the given flags, false otherwise -// For installation: it assumes that flags were added and parsed before -// it was called. -func (c *Complete) Complete() bool { - line, point, ok := getEnv() - if !ok { - // make sure flags parsed, - // in case they were not added in the main program - return c.CLI.Run() - } - - if point >= 0 && point < len(line) { - line = line[:point] - } - - Log("Completing phrase: %s", line) - a := newArgs(line) - Log("Completing last field: %s", a.Last) - options := c.Command.Predict(a) - Log("Options: %s", options) - - // filter only options that match the last argument - matches := []string{} - for _, option := range options { - if match.Prefix(option, a.Last) { - matches = append(matches, option) - } - } - Log("Matches: %s", matches) - c.output(matches) - return true -} - -func getEnv() (line string, point int, ok bool) { - line = os.Getenv(envLine) - if line == "" { - return - } - point, err := strconv.Atoi(os.Getenv(envPoint)) - if err != nil { - // If failed parsing point for some reason, set it to point - // on the end of the line. - Log("Failed parsing point %s: %v", os.Getenv(envPoint), err) - point = len(line) - } - return line, point, true -} - -func (c *Complete) output(options []string) { - // stdout of program defines the complete options - for _, option := range options { - fmt.Fprintln(c.Out, option) - } -} diff --git a/vendor/github.com/posener/complete/go.mod b/vendor/github.com/posener/complete/go.mod deleted file mode 100644 index fef0c440..00000000 --- a/vendor/github.com/posener/complete/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/posener/complete - -require github.com/hashicorp/go-multierror v1.0.0 diff --git a/vendor/github.com/posener/complete/go.sum b/vendor/github.com/posener/complete/go.sum deleted file mode 100644 index d2f13301..00000000 --- a/vendor/github.com/posener/complete/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go deleted file mode 100644 index c3029556..00000000 --- a/vendor/github.com/posener/complete/log.go +++ /dev/null @@ -1,22 +0,0 @@ -package complete - -import ( - "io/ioutil" - "log" - "os" -) - -// Log is used for debugging purposes -// since complete is running on tab completion, it is nice to -// have logs to the stderr (when writing your own completer) -// to write logs, set the COMP_DEBUG environment variable and -// use complete.Log in the complete program -var Log = getLogger() - -func getLogger() func(format string, args ...interface{}) { - var logfile = ioutil.Discard - if os.Getenv(envDebug) != "" { - logfile = os.Stderr - } - return log.New(logfile, "complete ", log.Flags()).Printf -} diff --git a/vendor/github.com/posener/complete/match/file.go b/vendor/github.com/posener/complete/match/file.go deleted file mode 100644 index 051171e8..00000000 --- a/vendor/github.com/posener/complete/match/file.go +++ /dev/null @@ -1,19 +0,0 @@ -package match - -import "strings" - -// File returns true if prefix can match the file -func File(file, prefix string) bool { - // special case for current directory completion - if file == "./" && (prefix == "." || prefix == "") { - return true - } - if prefix == "." && strings.HasPrefix(file, ".") { - return true - } - - file = strings.TrimPrefix(file, "./") - prefix = strings.TrimPrefix(prefix, "./") - - return strings.HasPrefix(file, prefix) -} diff --git a/vendor/github.com/posener/complete/match/match.go b/vendor/github.com/posener/complete/match/match.go deleted file mode 100644 index 812fcac9..00000000 --- a/vendor/github.com/posener/complete/match/match.go +++ /dev/null @@ -1,6 +0,0 @@ -package match - -// Match matches two strings -// it is used for comparing a term to the last typed -// word, the prefix, and see if it is a possible auto complete option. -type Match func(term, prefix string) bool diff --git a/vendor/github.com/posener/complete/match/prefix.go b/vendor/github.com/posener/complete/match/prefix.go deleted file mode 100644 index 9a01ba63..00000000 --- a/vendor/github.com/posener/complete/match/prefix.go +++ /dev/null @@ -1,9 +0,0 @@ -package match - -import "strings" - -// Prefix is a simple Matcher, if the word is it's prefix, there is a match -// Match returns true if a has the prefix as prefix -func Prefix(long, prefix string) bool { - return strings.HasPrefix(long, prefix) -} diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go deleted file mode 100644 index 82070632..00000000 --- a/vendor/github.com/posener/complete/predict.go +++ /dev/null @@ -1,41 +0,0 @@ -package complete - -// Predictor implements a predict method, in which given -// command line arguments returns a list of options it predicts. -type Predictor interface { - Predict(Args) []string -} - -// PredictOr unions two predicate functions, so that the result predicate -// returns the union of their predication -func PredictOr(predictors ...Predictor) Predictor { - return PredictFunc(func(a Args) (prediction []string) { - for _, p := range predictors { - if p == nil { - continue - } - prediction = append(prediction, p.Predict(a)...) - } - return - }) -} - -// PredictFunc determines what terms can follow a command or a flag -// It is used for auto completion, given last - the last word in the already -// in the command line, what words can complete it. -type PredictFunc func(Args) []string - -// Predict invokes the predict function and implements the Predictor interface -func (p PredictFunc) Predict(a Args) []string { - if p == nil { - return nil - } - return p(a) -} - -// PredictNothing does not expect anything after. -var PredictNothing Predictor - -// PredictAnything expects something, but nothing particular, such as a number -// or arbitrary name. -var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go deleted file mode 100644 index c8adf7e8..00000000 --- a/vendor/github.com/posener/complete/predict_files.go +++ /dev/null @@ -1,108 +0,0 @@ -package complete - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/posener/complete/match" -) - -// PredictDirs will search for directories in the given started to be typed -// path, if no path was started to be typed, it will complete to directories -// in the current working directory. -func PredictDirs(pattern string) Predictor { - return files(pattern, false) -} - -// PredictFiles will search for files matching the given pattern in the started to -// be typed path, if no path was started to be typed, it will complete to files that -// match the pattern in the current working directory. -// To match any file, use "*" as pattern. To match go files use "*.go", and so on. -func PredictFiles(pattern string) Predictor { - return files(pattern, true) -} - -func files(pattern string, allowFiles bool) PredictFunc { - - // search for files according to arguments, - // if only one directory has matched the result, search recursively into - // this directory to give more results. - return func(a Args) (prediction []string) { - prediction = predictFiles(a, pattern, allowFiles) - - // if the number of prediction is not 1, we either have many results or - // have no results, so we return it. - if len(prediction) != 1 { - return - } - - // only try deeper, if the one item is a directory - if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { - return - } - - a.Last = prediction[0] - return predictFiles(a, pattern, allowFiles) - } -} - -func predictFiles(a Args, pattern string, allowFiles bool) []string { - if strings.HasSuffix(a.Last, "/..") { - return nil - } - - dir := a.Directory() - files := listFiles(dir, pattern, allowFiles) - - // add dir if match - files = append(files, dir) - - return PredictFilesSet(files).Predict(a) -} - -// PredictFilesSet predict according to file rules to a given set of file names -func PredictFilesSet(files []string) PredictFunc { - return func(a Args) (prediction []string) { - // add all matching files to prediction - for _, f := range files { - f = fixPathForm(a.Last, f) - - // test matching of file to the argument - if match.File(f, a.Last) { - prediction = append(prediction, f) - } - } - return - } -} - -func listFiles(dir, pattern string, allowFiles bool) []string { - // set of all file names - m := map[string]bool{} - - // list files - if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { - for _, f := range files { - if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { - m[f] = true - } - } - } - - // list directories - if dirs, err := ioutil.ReadDir(dir); err == nil { - for _, d := range dirs { - if d.IsDir() { - m[filepath.Join(dir, d.Name())] = true - } - } - } - - list := make([]string, 0, len(m)) - for k := range m { - list = append(list, k) - } - return list -} diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go deleted file mode 100644 index fa4a34ae..00000000 --- a/vendor/github.com/posener/complete/predict_set.go +++ /dev/null @@ -1,12 +0,0 @@ -package complete - -// PredictSet expects specific set of terms, given in the options argument. -func PredictSet(options ...string) Predictor { - return predictSet(options) -} - -type predictSet []string - -func (p predictSet) Predict(a Args) []string { - return p -} diff --git a/vendor/github.com/posener/complete/readme.md b/vendor/github.com/posener/complete/readme.md deleted file mode 100644 index 6d757ef8..00000000 --- a/vendor/github.com/posener/complete/readme.md +++ /dev/null @@ -1,118 +0,0 @@ -# complete - -A tool for bash writing bash completion in go, and bash completion for the go command line. - -[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) -[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) -[![golangci](https://golangci.com/badges/github.com/posener/complete.svg)](https://golangci.com/r/github.com/posener/complete) -[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) -[![Go Report Card](https://goreportcard.com/badge/github.com/posener/complete)](https://goreportcard.com/report/github.com/posener/complete) - -Writing bash completion scripts is a hard work. This package provides an easy way -to create bash completion scripts for any command, and also an easy way to install/uninstall -the completion of the command. - -## go command bash completion - -In [gocomplete](./gocomplete) there is an example for bash completion for the `go` command line. - -This is an example that uses the `complete` package on the `go` command - the `complete` package -can also be used to implement any completions, see [Usage](#usage). - -### Install - -1. Type in your shell: -``` -go get -u github.com/posener/complete/gocomplete -gocomplete -install -``` - -2. Restart your shell - -Uninstall by `gocomplete -uninstall` - -### Features - -- Complete `go` command, including sub commands and all flags. -- Complete packages names or `.go` files when necessary. -- Complete test names after `-run` flag. - -## complete package - -Supported shells: - -- [x] bash -- [x] zsh -- [x] fish - -### Usage - -Assuming you have program called `run` and you want to have bash completion -for it, meaning, if you type `run` then space, then press the `Tab` key, -the shell will suggest relevant complete options. - -In that case, we will create a program called `runcomplete`, a go program, -with a `func main()` and so, that will make the completion of the `run` -program. Once the `runcomplete` will be in a binary form, we could -`runcomplete -install` and that will add to our shell all the bash completion -options for `run`. - -So here it is: - -```go -import "github.com/posener/complete" - -func main() { - - // create a Command object, that represents the command we want - // to complete. - run := complete.Command{ - - // Sub defines a list of sub commands of the program, - // this is recursive, since every command is of type command also. - Sub: complete.Commands{ - - // add a build sub command - "build": complete.Command { - - // define flags of the build sub command - Flags: complete.Flags{ - // build sub command has a flag '-cpus', which - // expects number of cpus after it. in that case - // anything could complete this flag. - "-cpus": complete.PredictAnything, - }, - }, - }, - - // define flags of the 'run' main command - Flags: complete.Flags{ - // a flag -o, which expects a file ending with .out after - // it, the tab completion will auto complete for files matching - // the given pattern. - "-o": complete.PredictFiles("*.out"), - }, - - // define global flags of the 'run' main command - // those will show up also when a sub command was entered in the - // command line - GlobalFlags: complete.Flags{ - - // a flag '-h' which does not expects anything after it - "-h": complete.PredictNothing, - }, - } - - // run the command completion, as part of the main() function. - // this triggers the autocompletion when needed. - // name must be exactly as the binary that we want to complete. - complete.New("run", run).Run() -} -``` - -### Self completing program - -In case that the program that we want to complete is written in go we -can make it self completing. - -Here is an [example](./example/self/main.go) diff --git a/vendor/github.com/posener/complete/test.sh b/vendor/github.com/posener/complete/test.sh deleted file mode 100644 index 56bfcf15..00000000 --- a/vendor/github.com/posener/complete/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list ./... | grep -v vendor); do - go test -v -race -coverprofile=profile.out -covermode=atomic $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done \ No newline at end of file diff --git a/vendor/github.com/posener/complete/utils.go b/vendor/github.com/posener/complete/utils.go deleted file mode 100644 index 58b8b792..00000000 --- a/vendor/github.com/posener/complete/utils.go +++ /dev/null @@ -1,46 +0,0 @@ -package complete - -import ( - "os" - "path/filepath" - "strings" -) - -// fixPathForm changes a file name to a relative name -func fixPathForm(last string, file string) string { - // get wording directory for relative name - workDir, err := os.Getwd() - if err != nil { - return file - } - - abs, err := filepath.Abs(file) - if err != nil { - return file - } - - // if last is absolute, return path as absolute - if filepath.IsAbs(last) { - return fixDirPath(abs) - } - - rel, err := filepath.Rel(workDir, abs) - if err != nil { - return file - } - - // fix ./ prefix of path - if rel != "." && strings.HasPrefix(last, ".") { - rel = "./" + rel - } - - return fixDirPath(rel) -} - -func fixDirPath(path string) string { - info, err := os.Stat(path) - if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { - path += "/" - } - return path -} diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml deleted file mode 100644 index 0637db72..00000000 --- a/vendor/github.com/spf13/afero/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go - -go: - - 1.9 - - "1.10" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build - - go test -race -v ./... - diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt deleted file mode 100644 index 298f0e26..00000000 --- a/vendor/github.com/spf13/afero/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md deleted file mode 100644 index 0c9b04b5..00000000 --- a/vendor/github.com/spf13/afero/README.md +++ /dev/null @@ -1,452 +0,0 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) - -A FileSystem Abstraction System for Go - -[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -# Overview - -Afero is an filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. - -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power -and benefit of the os and ioutil packages. - -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. - -It is suitable for use in a any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. - - -## Afero Features - -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware - - -# Using Afero - -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define you own file system. -* Wrap for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero - -Next include Afero in your application. -```go -import "github.com/spf13/afero" -``` - -## Step 2: Declare a backend - -First define a package variable and set it to a pointer to a filesystem. -```go -var AppFs = afero.NewMemMapFs() - -or - -var AppFs = afero.NewOsFs() -``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. - -## Step 3: Use it like you would the OS package - -Throughout your application use any function and method like you normally -would. - -So if my application before had: -```go -os.Open('/tmp/foo') -``` -We would replace it with: -```go -AppFs.Open('/tmp/foo') -``` - -`AppFs` being the variable we defined above. - - -## List of all available functions - -File System Methods Available: -```go -Chmod(name string, mode os.FileMode) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error -``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. - -## Using Afero's utility functions - -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. - -The afero utilities support all afero compatible backends. - -The list of utilities includes: - -```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) - -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. - -### Calling utilities directly - -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") - -``` - -### Calling via Afero - -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` - -## Using Afero for Testing - -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. - -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed - -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). - -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. - -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} -``` - -# Available Backends - -## Operating System Native - -### OsFs - -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. - -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755)) -``` - -## Memory Backed Storage - -### MemMapFs - -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. - -```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755)) -``` - -#### InMemoryFile - -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. - -## Network Interfaces - -### SftpFs - -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. - -## Filtering Backends - -### BasePathFs - -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. - -```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` - -### ReadOnlyFs - -A thin wrapper around the source Fs providing a read only view. - -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM -``` - -# RegexpFs - -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. - -```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` - -### HttpFs - -Afero provides an http compatible backend which can wrap any of the existing -backends. - -The Http package requires a slightly specific version of Open which -returns an http.File type. - -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. - -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir())) -http.Handle("/", fileserver) -``` - -## Composite Backends - -Afero provides the ability have two filesystems (or more) act as a single -file system. - -### CacheOnReadFs - -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. - -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. - -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). - -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. - -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. - -```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) -``` - -### CopyOnWriteFs() - -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. - -Read operations will first look in the overlay and if not found there, will -serve the file from the base. - -Changes to the file system will only be made in the overlay. - -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). - -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. - -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) - - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` - -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. - - -## Desired/possible backends - -The following is a short list of possible backends we hope someone will -implement: - -* SSH -* ZIP -* TAR -* S3 - -# About the project - -## What's in the name - -Afero comes from the latin roots Ad-Facere. - -**"Ad"** is a prefix meaning "to". - -**"Facere"** is a form of the root "faciō" making "make or do". - -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. - -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". - -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. - -## Release Notes - -* **0.10.0** 2015.12.10 - * Full compatibility with Windows - * Introduction of afero utilities - * Test suite rewritten to work cross platform - * Normalize paths for MemMapFs - * Adding Sync to the file interface - * **Breaking Change** Walk and ReadDir have changed parameter order - * Moving types used by MemMapFs to a subpackage - * General bugfixes and improvements -* **0.9.0** 2015.11.05 - * New Walk function similar to filepath.Walk - * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC - * MemMapFs.Remove now really deletes the file - * InMemoryFile.Readdir and Readdirnames work correctly - * InMemoryFile functions lock it for concurrent access - * Test suite improvements -* **0.8.0** 2014.10.28 - * First public version - * Interfaces feel ready for people to build using - * Interfaces satisfy all known uses - * MemMapFs passes the majority of the OS test suite - * OsFs passes the majority of the OS test suite - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) - -## License - -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go deleted file mode 100644 index f5b5e127..00000000 --- a/vendor/github.com/spf13/afero/afero.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package afero provides types and methods for interacting with the filesystem, -// as an abstraction layer. - -// Afero also provides a few implementations that are mostly interoperable. One that -// uses the operating system filesystem, one that uses memory to store files -// (cross platform) and an interface that should be implemented if you want to -// provide your own filesystem. - -package afero - -import ( - "errors" - "io" - "os" - "time" -) - -type Afero struct { - Fs -} - -// File represents a file in the filesystem. -type File interface { - io.Closer - io.Reader - io.ReaderAt - io.Seeker - io.Writer - io.WriterAt - - Name() string - Readdir(count int) ([]os.FileInfo, error) - Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) - Sync() error - Truncate(size int64) error - WriteString(s string) (ret int, err error) -} - -// Fs is the filesystem interface. -// -// Any simulated or real filesystem should implement this interface. -type Fs interface { - // Create creates a file in the filesystem, returning the file and an - // error, if any happens. - Create(name string) (File, error) - - // Mkdir creates a directory in the filesystem, return an error if any - // happens. - Mkdir(name string, perm os.FileMode) error - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(path string, perm os.FileMode) error - - // Open opens a file, returning it or an error, if any happens. - Open(name string) (File, error) - - // OpenFile opens a file using the given flags and the given mode. - OpenFile(name string, flag int, perm os.FileMode) (File, error) - - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(name string) error - - // RemoveAll removes a directory path and any children it contains. It - // does not fail if the path does not exist (return nil). - RemoveAll(path string) error - - // Rename renames a file. - Rename(oldname, newname string) error - - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(name string) (os.FileInfo, error) - - // The name of this FileSystem - Name() string - - //Chmod changes the mode of the named file to mode. - Chmod(name string, mode os.FileMode) error - - //Chtimes changes the access and modification times of the named file - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml deleted file mode 100644 index a633ad50..00000000 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: '{build}' -clone_folder: C:\gopath\src\github.com\spf13\afero -environment: - GOPATH: C:\gopath -build_script: -- cmd: >- - go version - - go env - - go get -v github.com/spf13/afero/... - - go build github.com/spf13/afero -test_script: -- cmd: go test -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go deleted file mode 100644 index 616ff8ff..00000000 --- a/vendor/github.com/spf13/afero/basepath.go +++ /dev/null @@ -1,180 +0,0 @@ -package afero - -import ( - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var _ Lstater = (*BasePathFs)(nil) - -// The BasePathFs restricts all operations to a given path within an Fs. -// The given file name to the operations on this Fs will be prepended with -// the base path before calling the base Fs. -// Any file name (after filepath.Clean()) outside this base path will be -// treated as non existing file. -// -// Note that it does not clean the error messages on return, so you may -// reveal the real path on errors. -type BasePathFs struct { - source Fs - path string -} - -type BasePathFile struct { - File - path string -} - -func (f *BasePathFile) Name() string { - sourcename := f.File.Name() - return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) -} - -func NewBasePathFs(source Fs, path string) Fs { - return &BasePathFs{source: source, path: path} -} - -// on a file outside the base path it returns the given file name and an error, -// else the given file with the base path prepended -func (b *BasePathFs) RealPath(name string) (path string, err error) { - if err := validateBasePathName(name); err != nil { - return name, err - } - - bpath := filepath.Clean(b.path) - path = filepath.Clean(filepath.Join(bpath, name)) - if !strings.HasPrefix(path, bpath) { - return name, os.ErrNotExist - } - - return path, nil -} - -func validateBasePathName(name string) error { - if runtime.GOOS != "windows" { - // Not much to do here; - // the virtual file paths all look absolute on *nix. - return nil - } - - // On Windows a common mistake would be to provide an absolute OS path - // We could strip out the base part, but that would not be very portable. - if filepath.IsAbs(name) { - return os.ErrNotExist - } - - return nil -} - -func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chtimes", Path: name, Err: err} - } - return b.source.Chtimes(name, atime, mtime) -} - -func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chmod", Path: name, Err: err} - } - return b.source.Chmod(name, mode) -} - -func (b *BasePathFs) Name() string { - return "BasePathFs" -} - -func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "stat", Path: name, Err: err} - } - return b.source.Stat(name) -} - -func (b *BasePathFs) Rename(oldname, newname string) (err error) { - if oldname, err = b.RealPath(oldname); err != nil { - return &os.PathError{Op: "rename", Path: oldname, Err: err} - } - if newname, err = b.RealPath(newname); err != nil { - return &os.PathError{Op: "rename", Path: newname, Err: err} - } - return b.source.Rename(oldname, newname) -} - -func (b *BasePathFs) RemoveAll(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove_all", Path: name, Err: err} - } - return b.source.RemoveAll(name) -} - -func (b *BasePathFs) Remove(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - return b.source.Remove(name) -} - -func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "openfile", Path: name, Err: err} - } - sourcef, err := b.source.OpenFile(name, flag, mode) - if err != nil { - return nil, err - } - return &BasePathFile{sourcef, b.path}, nil -} - -func (b *BasePathFs) Open(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} - } - sourcef, err := b.source.Open(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.Mkdir(name, mode) -} - -func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.MkdirAll(name, mode) -} - -func (b *BasePathFs) Create(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "create", Path: name, Err: err} - } - sourcef, err := b.source.Create(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - name, err := b.RealPath(name) - if err != nil { - return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} - } - if lstater, ok := b.source.(Lstater); ok { - return lstater.LstatIfPossible(name) - } - fi, err := b.source.Stat(name) - return fi, false, err -} - -// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go deleted file mode 100644 index 29a26c67..00000000 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ /dev/null @@ -1,290 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -// If the cache duration is 0, cache time will be unlimited, i.e. once -// a file is in the layer, the base will never be read again for this file. -// -// For cache times greater than 0, the modification time of a file is -// checked. Note that a lot of file system implementations only allow a -// resolution of a second for timestamps... or as the godoc for os.Chtimes() -// states: "The underlying filesystem may truncate or round the values to a -// less precise time unit." -// -// This caching union will forward all write calls also to the base file -// system first. To prevent writing to the base Fs, wrap it in a read-only -// filter - Note: this will also make the overlay read-only, for writing files -// in the overlay, use the overlay Fs directly, not via the union Fs. -type CacheOnReadFs struct { - base Fs - layer Fs - cacheTime time.Duration -} - -func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { - return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} -} - -type cacheState int - -const ( - // not present in the overlay, unknown if it exists in the base: - cacheMiss cacheState = iota - // present in the overlay and in base, base file is newer: - cacheStale - // present in the overlay - with cache time == 0 it may exist in the base, - // with cacheTime > 0 it exists in the base and is same age or newer in the - // overlay - cacheHit - // happens if someone writes directly to the overlay without - // going through this union - cacheLocal -) - -func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { - var lfi, bfi os.FileInfo - lfi, err = u.layer.Stat(name) - if err == nil { - if u.cacheTime == 0 { - return cacheHit, lfi, nil - } - if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { - bfi, err = u.base.Stat(name) - if err != nil { - return cacheLocal, lfi, nil - } - if bfi.ModTime().After(lfi.ModTime()) { - return cacheStale, bfi, nil - } - } - return cacheHit, lfi, nil - } - - if err == syscall.ENOENT || os.IsNotExist(err) { - return cacheMiss, nil, nil - } - - return cacheMiss, nil, err -} - -func (u *CacheOnReadFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chtimes(name, atime, mtime) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chtimes(name, atime, mtime) - } - if err != nil { - return err - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chmod(name, mode) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chmod(name, mode) - } - if err != nil { - return err - } - return u.layer.Chmod(name, mode) -} - -func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheMiss: - return u.base.Stat(name) - default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo - return fi, nil - } -} - -func (u *CacheOnReadFs) Rename(oldname, newname string) error { - st, _, err := u.cacheStatus(oldname) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Rename(oldname, newname) - case cacheStale, cacheMiss: - if err := u.copyToLayer(oldname); err != nil { - return err - } - err = u.base.Rename(oldname, newname) - } - if err != nil { - return err - } - return u.layer.Rename(oldname, newname) -} - -func (u *CacheOnReadFs) Remove(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.Remove(name) - } - if err != nil { - return err - } - return u.layer.Remove(name) -} - -func (u *CacheOnReadFs) RemoveAll(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.RemoveAll(name) - } - if err != nil { - return err - } - return u.layer.RemoveAll(name) -} - -func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - st, _, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheLocal, cacheHit: - default: - if err := u.copyToLayer(name); err != nil { - return nil, err - } - } - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - bfi, err := u.base.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - lfi, err := u.layer.OpenFile(name, flag, perm) - if err != nil { - bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? - return nil, err - } - return &UnionFile{Base: bfi, Layer: lfi}, nil - } - return u.layer.OpenFile(name, flag, perm) -} - -func (u *CacheOnReadFs) Open(name string) (File, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - - switch st { - case cacheLocal: - return u.layer.Open(name) - - case cacheMiss: - bfi, err := u.base.Stat(name) - if err != nil { - return nil, err - } - if bfi.IsDir() { - return u.base.Open(name) - } - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - - case cacheStale: - if !fi.IsDir() { - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - } - case cacheHit: - if !fi.IsDir() { - return u.layer.Open(name) - } - } - // the dirs from cacheHit, cacheStale fall down here: - bfile, _ := u.base.Open(name) - lfile, err := u.layer.Open(name) - if err != nil && bfile == nil { - return nil, err - } - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { - err := u.base.Mkdir(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache -} - -func (u *CacheOnReadFs) Name() string { - return "CacheOnReadFs" -} - -func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { - err := u.base.MkdirAll(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CacheOnReadFs) Create(name string) (File, error) { - bfh, err := u.base.Create(name) - if err != nil { - return nil, err - } - lfh, err := u.layer.Create(name) - if err != nil { - // oops, see comment about OS_TRUNC above, should we remove? then we have to - // remember if the file did not exist before - bfh.Close() - return nil, err - } - return &UnionFile{Base: bfh, Layer: lfh}, nil -} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go deleted file mode 100644 index 5728243d..00000000 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build darwin openbsd freebsd netbsd dragonfly - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go deleted file mode 100644 index 968fc278..00000000 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go deleted file mode 100644 index e8108a85..00000000 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ /dev/null @@ -1,293 +0,0 @@ -package afero - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" -) - -var _ Lstater = (*CopyOnWriteFs)(nil) - -// The CopyOnWriteFs is a union filesystem: a read only base file system with -// a possibly writeable layer on top. Changes to the file system will only -// be made in the overlay: Changing an existing file in the base layer which -// is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes() and Chmod()). -// -// Reading directories is currently only supported via Open(), not OpenFile(). -type CopyOnWriteFs struct { - base Fs - layer Fs -} - -func NewCopyOnWriteFs(base Fs, layer Fs) Fs { - return &CopyOnWriteFs{base: base, layer: layer} -} - -// Returns true if the file is not in the overlay -func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { - if _, err := u.layer.Stat(name); err == nil { - return false, nil - } - _, err := u.base.Stat(name) - if err != nil { - if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { - return false, nil - } - } - if err == syscall.ENOENT { - return false, nil - } - } - return true, err -} - -func (u *CopyOnWriteFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chmod(name, mode) -} - -func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { - fi, err := u.layer.Stat(name) - if err != nil { - isNotExist := u.isNotExist(err) - if isNotExist { - return u.base.Stat(name) - } - return nil, err - } - return fi, nil -} - -func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - llayer, ok1 := u.layer.(Lstater) - lbase, ok2 := u.base.(Lstater) - - if ok1 { - fi, b, err := llayer.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - - if !u.isNotExist(err) { - return nil, b, err - } - } - - if ok2 { - fi, b, err := lbase.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - if !u.isNotExist(err) { - return nil, b, err - } - } - - fi, err := u.Stat(name) - - return fi, false, err -} - -func (u *CopyOnWriteFs) isNotExist(err error) bool { - if e, ok := err.(*os.PathError); ok { - err = e.Err - } - if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { - return true - } - return false -} - -// Renaming files present only in the base layer is not permitted -func (u *CopyOnWriteFs) Rename(oldname, newname string) error { - b, err := u.isBaseFile(oldname) - if err != nil { - return err - } - if b { - return syscall.EPERM - } - return u.layer.Rename(oldname, newname) -} - -// Removing files present only in the base layer is not permitted. If -// a file is present in the base layer and the overlay, only the overlay -// will be removed. -func (u *CopyOnWriteFs) Remove(name string) error { - err := u.layer.Remove(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) RemoveAll(name string) error { - err := u.layer.RemoveAll(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - if b { - if err = u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - dir := filepath.Dir(name) - isaDir, err := IsDir(u.base, dir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if isaDir { - if err = u.layer.MkdirAll(dir, 0777); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - isaDir, err = IsDir(u.layer, dir) - if err != nil { - return nil, err - } - if isaDir { - return u.layer.OpenFile(name, flag, perm) - } - - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? - } - if b { - return u.base.OpenFile(name, flag, perm) - } - return u.layer.OpenFile(name, flag, perm) -} - -// This function handles the 9 different possibilities caused -// by the union which are the intersection of the following... -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory -func (u *CopyOnWriteFs) Open(name string) (File, error) { - // Since the overlay overrides the base we check that first - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - // If overlay doesn't exist, return the base (base state irrelevant) - if b { - return u.base.Open(name) - } - - // If overlay is a file, return it (base state irrelevant) - dir, err := IsDir(u.layer, name) - if err != nil { - return nil, err - } - if !dir { - return u.layer.Open(name) - } - - // Overlay is a directory, base state now matters. - // Base state has 3 states to check but 2 outcomes: - // A. It's a file or non-readable in the base (return just the overlay) - // B. It's an accessible directory in the base (return a UnionFile) - - // If base is file or nonreadable, return overlay - dir, err = IsDir(u.base, name) - if !dir || err != nil { - return u.layer.Open(name) - } - - // Both base & layer are directories - // Return union file (if opens are without error) - bfile, bErr := u.base.Open(name) - lfile, lErr := u.layer.Open(name) - - // If either have errors at this point something is very wrong. Return nil and the errors - if bErr != nil || lErr != nil { - return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) - } - - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return ErrFileExists - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Name() string { - return "CopyOnWriteFs" -} - -func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - // This is in line with how os.MkdirAll behaves. - return nil - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) -} diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod deleted file mode 100644 index 08685509..00000000 --- a/vendor/github.com/spf13/afero/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/spf13/afero - -require golang.org/x/text v0.3.0 diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum deleted file mode 100644 index 6bad37b2..00000000 --- a/vendor/github.com/spf13/afero/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go deleted file mode 100644 index c4219368..00000000 --- a/vendor/github.com/spf13/afero/httpFs.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" -) - -type httpDir struct { - basePath string - fs HttpFs -} - -func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d.basePath) - if dir == "" { - dir = "." - } - - f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -type HttpFs struct { - source Fs -} - -func NewHttpFs(source Fs) *HttpFs { - return &HttpFs{source: source} -} - -func (h HttpFs) Dir(s string) *httpDir { - return &httpDir{basePath: s, fs: h} -} - -func (h HttpFs) Name() string { return "h HttpFs" } - -func (h HttpFs) Create(name string) (File, error) { - return h.source.Create(name) -} - -func (h HttpFs) Chmod(name string, mode os.FileMode) error { - return h.source.Chmod(name, mode) -} - -func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return h.source.Chtimes(name, atime, mtime) -} - -func (h HttpFs) Mkdir(name string, perm os.FileMode) error { - return h.source.Mkdir(name, perm) -} - -func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { - return h.source.MkdirAll(path, perm) -} - -func (h HttpFs) Open(name string) (http.File, error) { - f, err := h.source.Open(name) - if err == nil { - if httpfile, ok := f.(http.File); ok { - return httpfile, nil - } - } - return nil, err -} - -func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return h.source.OpenFile(name, flag, perm) -} - -func (h HttpFs) Remove(name string) error { - return h.source.Remove(name) -} - -func (h HttpFs) RemoveAll(path string) error { - return h.source.RemoveAll(path) -} - -func (h HttpFs) Rename(oldname, newname string) error { - return h.source.Rename(oldname, newname) -} - -func (h HttpFs) Stat(name string) (os.FileInfo, error) { - return h.source.Stat(name) -} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go deleted file mode 100644 index 5c3a3d8f..00000000 --- a/vendor/github.com/spf13/afero/ioutil.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "sync" - "time" -) - -// byName implements sort.Interface. -type byName []os.FileInfo - -func (f byName) Len() int { return len(f) } -func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// ReadDir reads the directory named by dirname and returns -// a list of sorted directory entries. -func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { - return ReadDir(a.Fs, dirname) -} - -func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - list, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Sort(byName(list)) - return list, nil -} - -// ReadFile reads the file named by filename and returns the contents. -// A successful call returns err == nil, not err == EOF. Because ReadFile -// reads the whole file, it does not treat an EOF from Read as an error -// to be reported. -func (a Afero) ReadFile(filename string) ([]byte, error) { - return ReadFile(a.Fs, filename) -} - -func ReadFile(fs Fs, filename string) ([]byte, error) { - f, err := fs.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - // It's a good but not certain bet that FileInfo will tell us exactly how much to - // read, so let's try it but be prepared for the answer to be wrong. - var n int64 - - if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { - n = size - } - } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -func ReadAll(r io.Reader) ([]byte, error) { - return readAll(r, bytes.MinRead) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { - return WriteFile(a.Fs, filename, data, perm) -} - -func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func (a Afero) TempFile(dir, prefix string) (f File, err error) { - return TempFile(a.Fs, dir, prefix) -} - -func TempFile(fs Fs, dir, prefix string) (f File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func (a Afero) TempDir(dir, prefix string) (name string, err error) { - return TempDir(a.Fs, dir, prefix) -} -func TempDir(fs Fs, dir, prefix string) (name string, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextSuffix()) - err = fs.Mkdir(try, 0700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - if err == nil { - name = try - } - break - } - return -} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go deleted file mode 100644 index 89c1bfc0..00000000 --- a/vendor/github.com/spf13/afero/lstater.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" -) - -// Lstater is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. -// Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. -type Lstater interface { - LstatIfPossible(name string) (os.FileInfo, bool, error) -} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go deleted file mode 100644 index c18a87fb..00000000 --- a/vendor/github.com/spf13/afero/match.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2009 The Go Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "path/filepath" - "sort" - "strings" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// This was adapted from (http://golang.org/pkg/path/filepath) and uses several -// built-ins from that package. -func Glob(fs Fs, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - // Lstat not supported by a ll filesystems. - if _, err = lstatIfPossible(fs, pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - switch dir { - case "": - dir = "." - case string(filepath.Separator): - // nothing - default: - dir = dir[0 : len(dir)-1] // chop off trailing separator - } - - if !hasMeta(dir) { - return glob(fs, dir, file, nil) - } - - var m []string - m, err = Glob(fs, dir) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - d, err := fs.Open(dir) - if err != nil { - return - } - defer d.Close() - - names, _ := d.Readdirnames(-1) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.IndexAny(path, "*?[") >= 0 -} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go deleted file mode 100644 index e104013f..00000000 --- a/vendor/github.com/spf13/afero/mem/dir.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -type Dir interface { - Len() int - Names() []string - Files() []*FileData - Add(*FileData) - Remove(*FileData) -} - -func RemoveFromMemDir(dir *FileData, f *FileData) { - dir.memDir.Remove(f) -} - -func AddToMemDir(dir *FileData, f *FileData) { - dir.memDir.Add(f) -} - -func InitializeDir(d *FileData) { - if d.memDir == nil { - d.dir = true - d.memDir = &DirMap{} - } -} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go deleted file mode 100644 index 03a57ee5..00000000 --- a/vendor/github.com/spf13/afero/mem/dirmap.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import "sort" - -type DirMap map[string]*FileData - -func (m DirMap) Len() int { return len(m) } -func (m DirMap) Add(f *FileData) { m[f.name] = f } -func (m DirMap) Remove(f *FileData) { delete(m, f.name) } -func (m DirMap) Files() (files []*FileData) { - for _, f := range m { - files = append(files, f) - } - sort.Sort(filesSorter(files)) - return files -} - -// implement sort.Interface for []*FileData -type filesSorter []*FileData - -func (s filesSorter) Len() int { return len(s) } -func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } - -func (m DirMap) Names() (names []string) { - for x := range m { - names = append(names, x) - } - return names -} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go deleted file mode 100644 index 7af2fb56..00000000 --- a/vendor/github.com/spf13/afero/mem/file.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright © 2015 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import ( - "bytes" - "errors" - "io" - "os" - "path/filepath" - "sync" - "sync/atomic" -) - -import "time" - -const FilePathSeparator = string(filepath.Separator) - -type File struct { - // atomic requires 64-bit alignment for struct field access - at int64 - readDirCount int64 - closed bool - readOnly bool - fileData *FileData -} - -func NewFileHandle(data *FileData) *File { - return &File{fileData: data} -} - -func NewReadOnlyFileHandle(data *FileData) *File { - return &File{fileData: data, readOnly: true} -} - -func (f File) Data() *FileData { - return f.fileData -} - -type FileData struct { - sync.Mutex - name string - data []byte - memDir Dir - dir bool - mode os.FileMode - modtime time.Time -} - -func (d *FileData) Name() string { - d.Lock() - defer d.Unlock() - return d.name -} - -func CreateFile(name string) *FileData { - return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} -} - -func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} -} - -func ChangeFileName(f *FileData, newname string) { - f.Lock() - f.name = newname - f.Unlock() -} - -func SetMode(f *FileData, mode os.FileMode) { - f.Lock() - f.mode = mode - f.Unlock() -} - -func SetModTime(f *FileData, mtime time.Time) { - f.Lock() - setModTime(f, mtime) - f.Unlock() -} - -func setModTime(f *FileData, mtime time.Time) { - f.modtime = mtime -} - -func GetFileInfo(f *FileData) *FileInfo { - return &FileInfo{f} -} - -func (f *File) Open() error { - atomic.StoreInt64(&f.at, 0) - atomic.StoreInt64(&f.readDirCount, 0) - f.fileData.Lock() - f.closed = false - f.fileData.Unlock() - return nil -} - -func (f *File) Close() error { - f.fileData.Lock() - f.closed = true - if !f.readOnly { - setModTime(f.fileData, time.Now()) - } - f.fileData.Unlock() - return nil -} - -func (f *File) Name() string { - return f.fileData.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return &FileInfo{f.fileData}, nil -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} - } - var outLength int64 - - f.fileData.Lock() - files := f.fileData.memDir.Files()[f.readDirCount:] - if count > 0 { - if len(files) < count { - outLength = int64(len(files)) - } else { - outLength = int64(count) - } - if len(files) == 0 { - err = io.EOF - } - } else { - outLength = int64(len(files)) - } - f.readDirCount += outLength - f.fileData.Unlock() - - res = make([]os.FileInfo, outLength) - for i := range res { - res[i] = &FileInfo{files[i]} - } - - return res, err -} - -func (f *File) Readdirnames(n int) (names []string, err error) { - fi, err := f.Readdir(n) - names = make([]string, len(fi)) - for i, f := range fi { - _, names[i] = filepath.Split(f.Name()) - } - return names, err -} - -func (f *File) Read(b []byte) (n int, err error) { - f.fileData.Lock() - defer f.fileData.Unlock() - if f.closed == true { - return 0, ErrFileClosed - } - if len(b) > 0 && int(f.at) == len(f.fileData.data) { - return 0, io.EOF - } - if int(f.at) > len(f.fileData.data) { - return 0, io.ErrUnexpectedEOF - } - if len(f.fileData.data)-int(f.at) >= len(b) { - n = len(b) - } else { - n = len(f.fileData.data) - int(f.at) - } - copy(b, f.fileData.data[f.at:f.at+int64(n)]) - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Read(b) -} - -func (f *File) Truncate(size int64) error { - if f.closed == true { - return ErrFileClosed - } - if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - if size < 0 { - return ErrOutOfRange - } - if size > int64(len(f.fileData.data)) { - diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) - } else { - f.fileData.data = f.fileData.data[0:size] - } - setModTime(f.fileData, time.Now()) - return nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { - return 0, ErrFileClosed - } - switch whence { - case 0: - atomic.StoreInt64(&f.at, offset) - case 1: - atomic.AddInt64(&f.at, int64(offset)) - case 2: - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) - } - return f.at, nil -} - -func (f *File) Write(b []byte) (n int, err error) { - if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - n = len(b) - cur := atomic.LoadInt64(&f.at) - f.fileData.Lock() - defer f.fileData.Unlock() - diff := cur - int64(len(f.fileData.data)) - var tail []byte - if n+int(cur) < len(f.fileData.data) { - tail = f.fileData.data[n+int(cur):] - } - if diff > 0 { - f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) - f.fileData.data = append(f.fileData.data, tail...) - } else { - f.fileData.data = append(f.fileData.data[:cur], b...) - f.fileData.data = append(f.fileData.data, tail...) - } - setModTime(f.fileData, time.Now()) - - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) - return -} - -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Write(b) -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.Write([]byte(s)) -} - -func (f *File) Info() *FileInfo { - return &FileInfo{f.fileData} -} - -type FileInfo struct { - *FileData -} - -// Implements os.FileInfo -func (s *FileInfo) Name() string { - s.Lock() - _, name := filepath.Split(s.name) - s.Unlock() - return name -} -func (s *FileInfo) Mode() os.FileMode { - s.Lock() - defer s.Unlock() - return s.mode -} -func (s *FileInfo) ModTime() time.Time { - s.Lock() - defer s.Unlock() - return s.modtime -} -func (s *FileInfo) IsDir() bool { - s.Lock() - defer s.Unlock() - return s.dir -} -func (s *FileInfo) Sys() interface{} { return nil } -func (s *FileInfo) Size() int64 { - if s.IsDir() { - return int64(42) - } - s.Lock() - defer s.Unlock() - return int64(len(s.data)) -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go deleted file mode 100644 index 09498e70..00000000 --- a/vendor/github.com/spf13/afero/memmap.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/spf13/afero/mem" -) - -type MemMapFs struct { - mu sync.RWMutex - data map[string]*mem.FileData - init sync.Once -} - -func NewMemMapFs() Fs { - return &MemMapFs{} -} - -func (m *MemMapFs) getData() map[string]*mem.FileData { - m.init.Do(func() { - m.data = make(map[string]*mem.FileData) - // Root should always exist, right? - // TODO: what about windows? - m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) - }) - return m.data -} - -func (*MemMapFs) Name() string { return "MemMapFS" } - -func (m *MemMapFs) Create(name string) (File, error) { - name = normalizePath(name) - m.mu.Lock() - file := mem.CreateFile(name) - m.getData()[name] = file - m.registerWithParent(file) - m.mu.Unlock() - return mem.NewFileHandle(file), nil -} - -func (m *MemMapFs) unRegisterWithParent(fileName string) error { - f, err := m.lockfreeOpen(fileName) - if err != nil { - return err - } - parent := m.findParent(f) - if parent == nil { - log.Panic("parent of ", f.Name(), " is nil") - } - - parent.Lock() - mem.RemoveFromMemDir(parent, f) - parent.Unlock() - return nil -} - -func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { - pdir, _ := filepath.Split(f.Name()) - pdir = filepath.Clean(pdir) - pfile, err := m.lockfreeOpen(pdir) - if err != nil { - return nil - } - return pfile -} - -func (m *MemMapFs) registerWithParent(f *mem.FileData) { - if f == nil { - return - } - parent := m.findParent(f) - if parent == nil { - pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, 0777) - if err != nil { - //log.Println("Mkdir error:", err) - return - } - parent, err = m.lockfreeOpen(pdir) - if err != nil { - //log.Println("Open after Mkdir error:", err) - return - } - } - - parent.Lock() - mem.InitializeDir(parent) - mem.AddToMemDir(parent, f) - parent.Unlock() -} - -func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - x, ok := m.getData()[name] - if ok { - // Only return ErrFileExists if it's a file, not a directory. - i := mem.FileInfo{FileData: x} - if !i.IsDir() { - return ErrFileExists - } - } else { - item := mem.CreateDir(name) - m.getData()[name] = item - m.registerWithParent(item) - } - return nil -} - -func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - _, ok := m.getData()[name] - m.mu.RUnlock() - if ok { - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - - m.mu.Lock() - item := mem.CreateDir(name) - m.getData()[name] = item - m.registerWithParent(item) - m.mu.Unlock() - - m.Chmod(name, perm|os.ModeDir) - - return nil -} - -func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { - err := m.Mkdir(path, perm) - if err != nil { - if err.(*os.PathError).Err == ErrFileExists { - return nil - } - return err - } - return nil -} - -// Handle some relative paths -func normalizePath(path string) string { - path = filepath.Clean(path) - - switch path { - case ".": - return FilePathSeparator - case "..": - return FilePathSeparator - default: - return path - } -} - -func (m *MemMapFs) Open(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewReadOnlyFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) openWrite(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) open(name string) (*mem.FileData, error) { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} - } - return f, nil -} - -func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { - name = normalizePath(name) - f, ok := m.getData()[name] - if ok { - return f, nil - } else { - return nil, ErrFileNotFound - } -} - -func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - chmod := false - file, err := m.openWrite(name) - if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { - file, err = m.Create(name) - chmod = true - } - if err != nil { - return nil, err - } - if flag == os.O_RDONLY { - file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) - } - if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, os.SEEK_END) - if err != nil { - file.Close() - return nil, err - } - } - if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { - err = file.Truncate(0) - if err != nil { - file.Close() - return nil, err - } - } - if chmod { - m.Chmod(name, perm) - } - return file, nil -} - -func (m *MemMapFs) Remove(name string) error { - name = normalizePath(name) - - m.mu.Lock() - defer m.mu.Unlock() - - if _, ok := m.getData()[name]; ok { - err := m.unRegisterWithParent(name) - if err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - delete(m.getData(), name) - } else { - return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} - } - return nil -} - -func (m *MemMapFs) RemoveAll(path string) error { - path = normalizePath(path) - m.mu.Lock() - m.unRegisterWithParent(path) - m.mu.Unlock() - - m.mu.RLock() - defer m.mu.RUnlock() - - for p, _ := range m.getData() { - if strings.HasPrefix(p, path) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) Rename(oldname, newname string) error { - oldname = normalizePath(oldname) - newname = normalizePath(newname) - - if oldname == newname { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - if _, ok := m.getData()[oldname]; ok { - m.mu.RUnlock() - m.mu.Lock() - m.unRegisterWithParent(oldname) - fileData := m.getData()[oldname] - delete(m.getData(), oldname) - mem.ChangeFileName(fileData, newname) - m.getData()[newname] = fileData - m.registerWithParent(fileData) - m.mu.Unlock() - m.mu.RLock() - } else { - return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} - } - return nil -} - -func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { - f, err := m.Open(name) - if err != nil { - return nil, err - } - fi := mem.GetFileInfo(f.(*mem.File).Data()) - return fi, nil -} - -func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetMode(f, mode) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetModTime(f, mtime) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) List() { - for _, x := range m.data { - y := mem.FileInfo{FileData: x} - fmt.Println(x.Name(), y.Size()) - } -} - -// func debugMemMapList(fs Fs) { -// if x, ok := fs.(*MemMapFs); ok { -// x.List() -// } -// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go deleted file mode 100644 index 13cc1b84..00000000 --- a/vendor/github.com/spf13/afero/os.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" -) - -var _ Lstater = (*OsFs)(nil) - -// OsFs is a Fs implementation that uses functions provided by the os package. -// -// For details in any method, check the documentation of the os package -// (http://golang.org/pkg/os/). -type OsFs struct{} - -func NewOsFs() Fs { - return &OsFs{} -} - -func (OsFs) Name() string { return "OsFs" } - -func (OsFs) Create(name string) (File, error) { - f, e := os.Create(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -func (OsFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (OsFs) Open(name string) (File, error) { - f, e := os.Open(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, e := os.OpenFile(name, flag, perm) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Remove(name string) error { - return os.Remove(name) -} - -func (OsFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -func (OsFs) Rename(oldname, newname string) error { - return os.Rename(oldname, newname) -} - -func (OsFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -func (OsFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fi, err := os.Lstat(name) - return fi, true, err -} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go deleted file mode 100644 index 18f60a0f..00000000 --- a/vendor/github.com/spf13/afero/path.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "path/filepath" - "sort" -) - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// adapted from https://golang.org/src/path/filepath/path.go -func readDirNames(fs Fs, dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// walk recursively descends path, calling walkFn -// adapted from https://golang.org/src/path/filepath/path.go -func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := readDirNames(fs, path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := filepath.Join(path, name) - fileInfo, err := lstatIfPossible(fs, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walk(fs, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// if the filesystem supports it, use Lstat, else use fs.Stat -func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { - if lfs, ok := fs.(Lstater); ok { - fi, _, err := lfs.LstatIfPossible(path) - return fi, err - } - return fs.Stat(path) -} - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. - -func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { - return Walk(a.Fs, root, walkFn) -} - -func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { - info, err := lstatIfPossible(fs, root) - if err != nil { - return walkFn(root, nil, err) - } - return walk(fs, root, info, walkFn) -} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go deleted file mode 100644 index c6376ec3..00000000 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ /dev/null @@ -1,80 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -var _ Lstater = (*ReadOnlyFs)(nil) - -type ReadOnlyFs struct { - source Fs -} - -func NewReadOnlyFs(source Fs) Fs { - return &ReadOnlyFs{source: source} -} - -func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { - return ReadDir(r.source, name) -} - -func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Name() string { - return "ReadOnlyFilter" -} - -func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { - return r.source.Stat(name) -} - -func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - if lsf, ok := r.source.(Lstater); ok { - return lsf.LstatIfPossible(name) - } - fi, err := r.Stat(name) - return fi, false, err -} - -func (r *ReadOnlyFs) Rename(o, n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) RemoveAll(p string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Remove(n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - return nil, syscall.EPERM - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *ReadOnlyFs) Open(n string) (File, error) { - return r.source.Open(n) -} - -func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Create(n string) (File, error) { - return nil, syscall.EPERM -} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go deleted file mode 100644 index 9d92dbc0..00000000 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ /dev/null @@ -1,214 +0,0 @@ -package afero - -import ( - "os" - "regexp" - "syscall" - "time" -) - -// The RegexpFs filters files (not directories) by regular expression. Only -// files matching the given regexp will be allowed, all others get a ENOENT error ( -// "No such file or directory"). -// -type RegexpFs struct { - re *regexp.Regexp - source Fs -} - -func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { - return &RegexpFs{source: source, re: re} -} - -type RegexpFile struct { - f File - re *regexp.Regexp -} - -func (r *RegexpFs) matchesName(name string) error { - if r.re == nil { - return nil - } - if r.re.MatchString(name) { - return nil - } - return syscall.ENOENT -} - -func (r *RegexpFs) dirOrMatches(name string) error { - dir, err := IsDir(r.source, name) - if err != nil { - return err - } - if dir { - return nil - } - return r.matchesName(name) -} - -func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chtimes(name, a, m) -} - -func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chmod(name, mode) -} - -func (r *RegexpFs) Name() string { - return "RegexpFs" -} - -func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.Stat(name) -} - -func (r *RegexpFs) Rename(oldname, newname string) error { - dir, err := IsDir(r.source, oldname) - if err != nil { - return err - } - if dir { - return nil - } - if err := r.matchesName(oldname); err != nil { - return err - } - if err := r.matchesName(newname); err != nil { - return err - } - return r.source.Rename(oldname, newname) -} - -func (r *RegexpFs) RemoveAll(p string) error { - dir, err := IsDir(r.source, p) - if err != nil { - return err - } - if !dir { - if err := r.matchesName(p); err != nil { - return err - } - } - return r.source.RemoveAll(p) -} - -func (r *RegexpFs) Remove(name string) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Remove(name) -} - -func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *RegexpFs) Open(name string) (File, error) { - dir, err := IsDir(r.source, name) - if err != nil { - return nil, err - } - if !dir { - if err := r.matchesName(name); err != nil { - return nil, err - } - } - f, err := r.source.Open(name) - return &RegexpFile{f: f, re: r.re}, nil -} - -func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { - return r.source.Mkdir(n, p) -} - -func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { - return r.source.MkdirAll(n, p) -} - -func (r *RegexpFs) Create(name string) (File, error) { - if err := r.matchesName(name); err != nil { - return nil, err - } - return r.source.Create(name) -} - -func (f *RegexpFile) Close() error { - return f.f.Close() -} - -func (f *RegexpFile) Read(s []byte) (int, error) { - return f.f.Read(s) -} - -func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { - return f.f.ReadAt(s, o) -} - -func (f *RegexpFile) Seek(o int64, w int) (int64, error) { - return f.f.Seek(o, w) -} - -func (f *RegexpFile) Write(s []byte) (int, error) { - return f.f.Write(s) -} - -func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { - return f.f.WriteAt(s, o) -} - -func (f *RegexpFile) Name() string { - return f.f.Name() -} - -func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { - var rfi []os.FileInfo - rfi, err = f.f.Readdir(c) - if err != nil { - return nil, err - } - for _, i := range rfi { - if i.IsDir() || f.re.MatchString(i.Name()) { - fi = append(fi, i) - } - } - return fi, nil -} - -func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { - fi, err := f.Readdir(c) - if err != nil { - return nil, err - } - for _, s := range fi { - n = append(n, s.Name()) - } - return n, nil -} - -func (f *RegexpFile) Stat() (os.FileInfo, error) { - return f.f.Stat() -} - -func (f *RegexpFile) Sync() error { - return f.f.Sync() -} - -func (f *RegexpFile) Truncate(s int64) error { - return f.f.Truncate(s) -} - -func (f *RegexpFile) WriteString(s string) (int, error) { - return f.f.WriteString(s) -} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go deleted file mode 100644 index eda96312..00000000 --- a/vendor/github.com/spf13/afero/unionFile.go +++ /dev/null @@ -1,320 +0,0 @@ -package afero - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -// The UnionFile implements the afero.File interface and will be returned -// when reading a directory present at least in the overlay or opening a file -// for writing. -// -// The calls to -// Readdir() and Readdirnames() merge the file os.FileInfo / names from the -// base and the overlay - for files present in both layers, only those -// from the overlay will be used. -// -// When opening files for writing (Create() / OpenFile() with the right flags) -// the operations will be done in both layers, starting with the overlay. A -// successful read in the overlay will move the cursor position in the base layer -// by the number of bytes read. -type UnionFile struct { - Base File - Layer File - Merger DirsMerger - off int - files []os.FileInfo -} - -func (f *UnionFile) Close() error { - // first close base, so we have a newer timestamp in the overlay. If we'd close - // the overlay first, we'd get a cacheStale the next time we access this file - // -> cache would be useless ;-) - if f.Base != nil { - f.Base.Close() - } - if f.Layer != nil { - return f.Layer.Close() - } - return BADFD -} - -func (f *UnionFile) Read(s []byte) (int, error) { - if f.Layer != nil { - n, err := f.Layer.Read(s) - if (err == nil || err == io.EOF) && f.Base != nil { - // advance the file position also in the base file, the next - // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { - // only overwrite err in case the seek fails: we need to - // report an eventual io.EOF to the caller - err = seekErr - } - } - return n, err - } - if f.Base != nil { - return f.Base.Read(s) - } - return 0, BADFD -} - -func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { - if f.Layer != nil { - n, err := f.Layer.ReadAt(s, o) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) - } - return n, err - } - if f.Base != nil { - return f.Base.ReadAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { - if f.Layer != nil { - pos, err = f.Layer.Seek(o, w) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o, w) - } - return pos, err - } - if f.Base != nil { - return f.Base.Seek(o, w) - } - return 0, BADFD -} - -func (f *UnionFile) Write(s []byte) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? - _, err = f.Base.Write(s) - } - return n, err - } - if f.Base != nil { - return f.Base.Write(s) - } - return 0, BADFD -} - -func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteAt(s, o) - if err == nil && f.Base != nil { - _, err = f.Base.WriteAt(s, o) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Name() string { - if f.Layer != nil { - return f.Layer.Name() - } - return f.Base.Name() -} - -// DirsMerger is how UnionFile weaves two directories together. -// It takes the FileInfo slices from the layer and the base and returns a -// single view. -type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) - -var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - var files = make(map[string]os.FileInfo) - - for _, fi := range lofi { - files[fi.Name()] = fi - } - - for _, fi := range bofi { - if _, exists := files[fi.Name()]; !exists { - files[fi.Name()] = fi - } - } - - rfi := make([]os.FileInfo, len(files)) - - i := 0 - for _, fi := range files { - rfi[i] = fi - i++ - } - - return rfi, nil - -} - -// Readdir will weave the two directories together and -// return a single view of the overlayed directories. -// At the end of the directory view, the error is io.EOF if c > 0. -func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger - if merge == nil { - merge = defaultUnionMergeDirsFn - } - - if f.off == 0 { - var lfi []os.FileInfo - if f.Layer != nil { - lfi, err = f.Layer.Readdir(-1) - if err != nil { - return nil, err - } - } - - var bfi []os.FileInfo - if f.Base != nil { - bfi, err = f.Base.Readdir(-1) - if err != nil { - return nil, err - } - - } - merged, err := merge(lfi, bfi) - if err != nil { - return nil, err - } - f.files = append(f.files, merged...) - } - - if c <= 0 && len(f.files) == 0 { - return f.files, nil - } - - if f.off >= len(f.files) { - return nil, io.EOF - } - - if c <= 0 { - return f.files[f.off:], nil - } - - if c > len(f.files) { - c = len(f.files) - } - - defer func() { f.off += c }() - return f.files[f.off:c], nil -} - -func (f *UnionFile) Readdirnames(c int) ([]string, error) { - rfi, err := f.Readdir(c) - if err != nil { - return nil, err - } - var names []string - for _, fi := range rfi { - names = append(names, fi.Name()) - } - return names, nil -} - -func (f *UnionFile) Stat() (os.FileInfo, error) { - if f.Layer != nil { - return f.Layer.Stat() - } - if f.Base != nil { - return f.Base.Stat() - } - return nil, BADFD -} - -func (f *UnionFile) Sync() (err error) { - if f.Layer != nil { - err = f.Layer.Sync() - if err == nil && f.Base != nil { - err = f.Base.Sync() - } - return err - } - if f.Base != nil { - return f.Base.Sync() - } - return BADFD -} - -func (f *UnionFile) Truncate(s int64) (err error) { - if f.Layer != nil { - err = f.Layer.Truncate(s) - if err == nil && f.Base != nil { - err = f.Base.Truncate(s) - } - return err - } - if f.Base != nil { - return f.Base.Truncate(s) - } - return BADFD -} - -func (f *UnionFile) WriteString(s string) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteString(s) - if err == nil && f.Base != nil { - _, err = f.Base.WriteString(s) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteString(s) - } - return 0, BADFD -} - -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - - // First make sure the directory exists - exists, err := Exists(layer, filepath.Dir(name)) - if err != nil { - return err - } - if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? - if err != nil { - return err - } - } - - // Create the file on the overlay - lfh, err := layer.Create(name) - if err != nil { - return err - } - n, err := io.Copy(lfh, bfh) - if err != nil { - // If anything fails, clean up the file - layer.Remove(name) - lfh.Close() - return err - } - - bfi, err := bfh.Stat() - if err != nil || bfi.Size() != n { - layer.Remove(name) - lfh.Close() - return syscall.EIO - } - - err = lfh.Close() - if err != nil { - layer.Remove(name) - lfh.Close() - return err - } - return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) -} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go deleted file mode 100644 index 4f253f48..00000000 --- a/vendor/github.com/spf13/afero/util.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright ©2015 Steve Francia -// Portions Copyright ©2015 The Hugo Authors -// Portions Copyright 2016-present Bjørn Erik Pedersen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// Filepath separator defined by os.Separator. -const FilePathSeparator = string(filepath.Separator) - -// Takes a reader and a path and writes the content -func (a Afero) WriteReader(path string, r io.Reader) (err error) { - return WriteReader(a.Fs, path, r) -} - -func WriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - if err != os.ErrExist { - return err - } - } - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -// Same as WriteReader but checks to see if file/directory already exists. -func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { - return SafeWriteReader(a.Fs, path, r) -} - -func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - return - } - } - - exists, err := Exists(fs, path) - if err != nil { - return - } - if exists { - return fmt.Errorf("%v already exists", path) - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -func (a Afero) GetTempDir(subPath string) string { - return GetTempDir(a.Fs, subPath) -} - -// GetTempDir returns the default temp directory with trailing slash -// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx -func GetTempDir(fs Fs, subPath string) string { - addSlash := func(p string) string { - if FilePathSeparator != p[len(p)-1:] { - p = p + FilePathSeparator - } - return p - } - dir := addSlash(os.TempDir()) - - if subPath != "" { - // preserve windows backslash :-( - if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) - } - dir = dir + UnicodeSanitize((subPath)) - if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) - } - - if exists, _ := Exists(fs, dir); exists { - return addSlash(dir) - } - - err := fs.MkdirAll(dir, 0777) - if err != nil { - panic(err) - } - dir = addSlash(dir) - } - return dir -} - -// Rewrite string to remove non-standard path characters -func UnicodeSanitize(s string) string { - source := []rune(s) - target := make([]rune, 0, len(source)) - - for _, r := range source { - if unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsMark(r) || - r == '.' || - r == '/' || - r == '\\' || - r == '_' || - r == '-' || - r == '%' || - r == ' ' || - r == '#' { - target = append(target, r) - } - } - - return string(target) -} - -// Transform characters with accents into plain forms. -func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) - result, _, _ := transform.String(t, string(s)) - - return result -} - -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - -func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { - return FileContainsBytes(a.Fs, filename, subslice) -} - -// Check if a file contains a specified byte slice. -func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslice), nil -} - -func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { - return FileContainsAnyBytes(a.Fs, filename, subslices) -} - -// Check if a file contains any of the specified byte slices. -func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslices...), nil -} - -// readerContains reports whether any of the subslices is within r. -func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - - if r == nil || len(subslices) == 0 { - return false - } - - largestSlice := 0 - - for _, sl := range subslices { - if len(sl) > largestSlice { - largestSlice = len(sl) - } - } - - if largestSlice == 0 { - return false - } - - bufflen := largestSlice * 4 - halflen := bufflen / 2 - buff := make([]byte, bufflen) - var err error - var n, i int - - for { - i++ - if i == 1 { - n, err = io.ReadAtLeast(r, buff[:halflen], halflen) - } else { - if i != 2 { - // shift left to catch overlapping matches - copy(buff[:], buff[halflen:]) - } - n, err = io.ReadAtLeast(r, buff[halflen:], halflen) - } - - if n > 0 { - for _, sl := range subslices { - if bytes.Contains(buff, sl) { - return true - } - } - } - - if err != nil { - break - } - } - return false -} - -func (a Afero) DirExists(path string) (bool, error) { - return DirExists(a.Fs, path) -} - -// DirExists checks if a path exists and is a directory. -func DirExists(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func (a Afero) IsDir(path string) (bool, error) { - return IsDir(a.Fs, path) -} - -// IsDir checks if a given path is a directory. -func IsDir(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - return fi.IsDir(), nil -} - -func (a Afero) IsEmpty(path string) (bool, error) { - return IsEmpty(a.Fs, path) -} - -// IsEmpty checks if a given file or directory is empty. -func IsEmpty(fs Fs, path string) (bool, error) { - if b, _ := Exists(fs, path); !b { - return false, fmt.Errorf("%q path does not exist", path) - } - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - if fi.IsDir() { - f, err := fs.Open(path) - if err != nil { - return false, err - } - defer f.Close() - list, err := f.Readdir(-1) - return len(list) == 0, nil - } - return fi.Size() == 0, nil -} - -func (a Afero) Exists(path string) (bool, error) { - return Exists(a.Fs, path) -} - -// Check if a file or directory exists. -func Exists(fs Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { - combinedPath := filepath.Join(basePathFs.path, relativePath) - if parent, ok := basePathFs.source.(*BasePathFs); ok { - return FullBaseFsPath(parent, combinedPath) - } - - return combinedPath -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/.gitignore b/vendor/github.com/testcontainers/testcontainers-go/.gitignore deleted file mode 100644 index 1f671668..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -debug.test -vendor diff --git a/vendor/github.com/testcontainers/testcontainers-go/.travis.yml b/vendor/github.com/testcontainers/testcontainers-go/.travis.yml deleted file mode 100644 index 5c61693e..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -language: go -go: - - 1.14.x - -install: true - -services: - - docker - -env: - - GO111MODULE=on - -before_script: - - go get gotest.tools/gotestsum - -script: - - go mod verify - - go mod tidy - - scripts/checks.sh - - go vet ./... - - gotestsum --format short-verbose ./... - diff --git a/vendor/github.com/testcontainers/testcontainers-go/LICENSE b/vendor/github.com/testcontainers/testcontainers-go/LICENSE deleted file mode 100644 index 607a9c3c..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017-2019 Gianluca Arbezzano - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/testcontainers/testcontainers-go/README.md b/vendor/github.com/testcontainers/testcontainers-go/README.md deleted file mode 100644 index ef8c7290..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/README.md +++ /dev/null @@ -1,160 +0,0 @@ -[![Build Status](https://travis-ci.org/testcontainers/testcontainers-go.svg?branch=master)](https://travis-ci.org/testcontainers/testcontainers-go) - -When I was working on a Zipkin PR I discovered a nice Java library called -[testcontainers](https://www.testcontainers.org/). - -It provides an easy and clean API over the go docker sdk to run, terminate and -connect to containers in your tests. - -I found myself comfortable programmatically writing the containers I need to run -an integration/smoke tests. So I started porting this library in Go. - - -This is the API I have defined: - -```go -package main - -import ( - "context" - "fmt" - "net/http" - "testing" - - "github.com/testcontainers/testcontainers-go" - "github.com/testcontainers/testcontainers-go/wait" -) - -func TestNginxLatestReturn(t *testing.T) { - ctx := context.Background() - req := testcontainers.ContainerRequest{ - Image: "nginx", - ExposedPorts: []string{"80/tcp"}, - WaitingFor: wait.ForHTTP("/"), - } - nginxC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ - ContainerRequest: req, - Started: true, - }) - if err != nil { - t.Error(err) - } - defer nginxC.Terminate(ctx) - ip, err := nginxC.Host(ctx) - if err != nil { - t.Error(err) - } - port, err := nginxC.MappedPort(ctx, "80") - if err != nil { - t.Error(err) - } - resp, err := http.Get(fmt.Sprintf("http://%s:%s", ip, port.Port())) - if resp.StatusCode != http.StatusOK { - t.Errorf("Expected status code %d. Got %d.", http.StatusOK, resp.StatusCode) - } -} -``` -This is a simple example, you can create one container in my case using the -`nginx` image. You can get its IP `ip, err := nginxC.GetContainerIpAddress(ctx)` and you -can use it to make a GET: `resp, err := http.Get(fmt.Sprintf("http://%s", ip))` - -To clean your environment you can defer the container termination `defer -nginxC.Terminate(ctx, t)`. `t` is `*testing.T` and it is used to notify is the -`defer` failed marking the test as failed. - - -## Build from Dockerfile - -Testcontainers-go gives you the ability to build an image and run a container from a Dockerfile. - -You can do so by specifying a `Context` (the filepath to the build context on your local filesystem) -and optionally a `Dockerfile` (defaults to "Dockerfile") like so: - -```go -req := ContainerRequest{ - FromDockerfile: testcontainers.FromDockerfile{ - Context: "/path/to/build/context", - Dockerfile: "CustomDockerfile", - }, - } -``` - -### Dynamic Build Context - -If you would like to send a build context that you created in code (maybe you have a dynamic Dockerfile), you can -send the build context as an `io.Reader` since the Docker Daemon accepts is as a tar file, you can use the [tar](https://golang.org/pkg/archive/tar/) package to create your context. - - -To do this you would use the `ContextArchive` attribute in the `FromDockerfile` struct. - -```go -var buf bytes.Buffer -tarWriter := tar.NewWriter(&buf) -// ... add some files -if err := tarWriter.Close(); err != nil { - // do something with err -} -reader := bytes.NewReader(buf.Bytes()) -fromDockerfile := testcontainers.FromDockerfile{ - ContextArchive: reader, -} -``` - -**Please Note** if you specify a `ContextArchive` this will cause testcontainers to ignore the path passed -in to `Context` - -## Sending a CMD to a Container - -If you would like to send a CMD (command) to a container, you can pass it in to the container request via the `Cmd` field... - -```go -req := ContainerRequest{ - Image: "alpine", - WaitingFor: wait.ForAll( - wait.ForLog("command override!"), - ), - Cmd: []string{"echo", "command override!"}, -} -``` - -## Following Container Logs - -If you wish to follow container logs, you can set up `LogConsumer`s. The log following functionality follows -a producer-consumer model. You will need to explicitly start and stop the producer. As logs are written to either -`stdout`, or `stderr` (`stdin` is not supported) they will be forwarded (produced) to any associated `LogConsumer`s. You can associate `LogConsumer`s -with the `.FollowOutput` function. - -**Please note** if you start the producer you should always stop it explicitly. - -for example, this consumer will just add logs to a slice - -```go -type TestLogConsumer struct { - Msgs []string -} - -func (g *TestLogConsumer) Accept(l Log) { - g.Msgs = append(g.Msgs, string(l.Content)) -} -``` - -this can be used like so: -```go -g := TestLogConsumer{ - Msgs: []string{}, -} - -err := c.StartLogProducer(ctx) -if err != nil { - // do something with err -} - -c.FollowOutput(&g) - -// some stuff happens... - -err = c.StopLogProducer() -if err != nil { - // do something with err -} -``` diff --git a/vendor/github.com/testcontainers/testcontainers-go/container.go b/vendor/github.com/testcontainers/testcontainers-go/container.go deleted file mode 100644 index f17ec683..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/container.go +++ /dev/null @@ -1,172 +0,0 @@ -package testcontainers - -import ( - "context" - "github.com/docker/docker/api/types/container" - "io" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/go-connections/nat" - "github.com/pkg/errors" - "github.com/testcontainers/testcontainers-go/wait" -) - -// DeprecatedContainer shows methods that were supported before, but are now deprecated -// Deprecated: Use Container -type DeprecatedContainer interface { - GetHostEndpoint(ctx context.Context, port string) (string, string, error) - GetIPAddress(ctx context.Context) (string, error) - LivenessCheckPorts(ctx context.Context) (nat.PortSet, error) - Terminate(ctx context.Context) error -} - -// ContainerProvider allows the creation of containers on an arbitrary system -type ContainerProvider interface { - CreateContainer(context.Context, ContainerRequest) (Container, error) // create a container without starting it - RunContainer(context.Context, ContainerRequest) (Container, error) // create a container and start it -} - -// Container allows getting info about and controlling a single container instance -type Container interface { - GetContainerID() string // get the container id from the provider - Endpoint(context.Context, string) (string, error) // get proto://ip:port string for the first exposed port - PortEndpoint(context.Context, nat.Port, string) (string, error) // get proto://ip:port string for the given exposed port - Host(context.Context) (string, error) // get host where the container port is exposed - MappedPort(context.Context, nat.Port) (nat.Port, error) // get externally mapped port for a container port - Ports(context.Context) (nat.PortMap, error) // get all exposed ports - SessionID() string // get session id - Start(context.Context) error // start the container - Terminate(context.Context) error // terminate the container - Logs(context.Context) (io.ReadCloser, error) // Get logs of the container - FollowOutput(LogConsumer) - StartLogProducer(context.Context) error - StopLogProducer() error - Name(context.Context) (string, error) // get container name - Networks(context.Context) ([]string, error) // get container networks - NetworkAliases(context.Context) (map[string][]string, error) // get container network aliases for a network - Exec(ctx context.Context, cmd []string) (int, error) -} - -// ImageBuildInfo defines what is needed to build an image -type ImageBuildInfo interface { - GetContext() (io.Reader, error) // the path to the build context - GetDockerfile() string // the relative path to the Dockerfile, including the fileitself - ShouldBuildImage() bool // return true if the image needs to be built -} - -// FromDockerfile represents the parameters needed to build an image from a Dockerfile -// rather than using a pre-built one -type FromDockerfile struct { - Context string // the path to the context of of the docker build - ContextArchive io.Reader // the tar archive file to send to docker that contains the build context - Dockerfile string // the path from the context to the Dockerfile for the image, defaults to "Dockerfile" -} - -// ContainerRequest represents the parameters used to get a running container -type ContainerRequest struct { - FromDockerfile - Image string - Env map[string]string - ExposedPorts []string // allow specifying protocol info - Cmd []string - Labels map[string]string - BindMounts map[string]string - VolumeMounts map[string]string - Tmpfs map[string]string - RegistryCred string - WaitingFor wait.Strategy - Name string // for specifying container name - Privileged bool // for starting privileged container - Networks []string // for specifying network names - NetworkAliases map[string][]string // for specifying network aliases - SkipReaper bool // indicates whether we skip setting up a reaper for this - ReaperImage string // alternative reaper image - AutoRemove bool // if set to true, the container will be removed from the host when stopped - NetworkMode container.NetworkMode - AlwaysPullImage bool // Always pull image -} - -// ProviderType is an enum for the possible providers -type ProviderType int - -// possible provider types -const ( - ProviderDocker ProviderType = iota // Docker is default = 0 -) - -// GetProvider provides the provider implementation for a certain type -func (t ProviderType) GetProvider() (GenericProvider, error) { - switch t { - case ProviderDocker: - provider, err := NewDockerProvider() - if err != nil { - return nil, errors.Wrap(err, "failed to create Docker provider") - } - return provider, nil - } - return nil, errors.New("unknown provider") -} - -// Validate ensures that the ContainerRequest does not have invalid paramters configured to it -// ex. make sure you are not specifying both an image as well as a context -func (c *ContainerRequest) Validate() error { - - validationMethods := []func() error{ - c.validateContextAndImage, - c.validateContexOrImageIsSpecified, - } - - var err error - for _, validationMethod := range validationMethods { - err = validationMethod() - if err != nil { - return err - } - } - - return nil -} - -// GetContext retrieve the build context for the request -func (c *ContainerRequest) GetContext() (io.Reader, error) { - if c.ContextArchive != nil { - return c.ContextArchive, nil - } - - buildContext, err := archive.TarWithOptions(c.Context, &archive.TarOptions{}) - if err != nil { - return nil, err - } - - return buildContext, nil -} - -// GetDockerfile returns the Dockerfile from the ContainerRequest, defaults to "Dockerfile" -func (c *ContainerRequest) GetDockerfile() string { - f := c.FromDockerfile.Dockerfile - if f == "" { - return "Dockerfile" - } - - return f -} - -func (c *ContainerRequest) ShouldBuildImage() bool { - return c.FromDockerfile.Context != "" || c.FromDockerfile.ContextArchive != nil -} - -func (c *ContainerRequest) validateContextAndImage() error { - if c.FromDockerfile.Context != "" && c.Image != "" { - return errors.New("you cannot specify both an Image and Context in a ContainerRequest") - } - - return nil -} - -func (c *ContainerRequest) validateContexOrImageIsSpecified() error { - if c.FromDockerfile.Context == "" && c.FromDockerfile.ContextArchive == nil && c.Image == "" { - return errors.New("you must specify either a build context or an image") - } - - return nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker.go b/vendor/github.com/testcontainers/testcontainers-go/docker.go deleted file mode 100644 index 2b21924d..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/docker.go +++ /dev/null @@ -1,754 +0,0 @@ -package testcontainers - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "log" - "net/url" - "os" - "os/exec" - "strings" - "time" - - "github.com/cenkalti/backoff" - "github.com/docker/docker/errdefs" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/client" - "github.com/docker/go-connections/nat" - - "github.com/google/uuid" - "github.com/pkg/errors" - - "github.com/testcontainers/testcontainers-go/wait" -) - -// Implement interfaces -var _ Container = (*DockerContainer)(nil) - -// DockerContainer represents a container started using Docker -type DockerContainer struct { - // Container ID from Docker - ID string - WaitingFor wait.Strategy - Image string - - // Cache to retrieve container information without re-fetching them from dockerd - raw *types.ContainerJSON - provider *DockerProvider - sessionID uuid.UUID - terminationSignal chan bool - skipReaper bool - consumers []LogConsumer - stopProducer chan bool -} - -func (c *DockerContainer) GetContainerID() string { - return c.ID -} - -// Endpoint gets proto://host:port string for the first exposed port -// Will returns just host:port if proto is "" -func (c *DockerContainer) Endpoint(ctx context.Context, proto string) (string, error) { - ports, err := c.Ports(ctx) - if err != nil { - return "", err - } - - // get first port - var firstPort nat.Port - for p := range ports { - firstPort = p - break - } - - return c.PortEndpoint(ctx, firstPort, proto) -} - -// PortEndpoint gets proto://host:port string for the given exposed port -// Will returns just host:port if proto is "" -func (c *DockerContainer) PortEndpoint(ctx context.Context, port nat.Port, proto string) (string, error) { - host, err := c.Host(ctx) - if err != nil { - return "", err - } - - outerPort, err := c.MappedPort(ctx, port) - if err != nil { - return "", err - } - - protoFull := "" - if proto != "" { - protoFull = fmt.Sprintf("%s://", proto) - } - - return fmt.Sprintf("%s%s:%s", protoFull, host, outerPort.Port()), nil -} - -// Host gets host (ip or name) of the docker daemon where the container port is exposed -// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel -// You can use the "TC_HOST" env variable to set this yourself -func (c *DockerContainer) Host(ctx context.Context) (string, error) { - host, err := c.provider.daemonHost() - if err != nil { - return "", err - } - return host, nil -} - -// MappedPort gets externally mapped port for a container port -func (c *DockerContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return "", err - } - if inspect.ContainerJSONBase.HostConfig.NetworkMode == "host" { - return port, nil - } - ports, err := c.Ports(ctx) - if err != nil { - return "", err - } - - for k, p := range ports { - if k.Port() != port.Port() { - continue - } - if port.Proto() != "" && k.Proto() != port.Proto() { - continue - } - return nat.NewPort(k.Proto(), p[0].HostPort) - } - - return "", errors.New("port not found") -} - -// Ports gets the exposed ports for the container. -func (c *DockerContainer) Ports(ctx context.Context) (nat.PortMap, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return nil, err - } - return inspect.NetworkSettings.Ports, nil -} - -// SessionID gets the current session id -func (c *DockerContainer) SessionID() string { - return c.sessionID.String() -} - -// Start will start an already created container -func (c *DockerContainer) Start(ctx context.Context) error { - shortID := c.ID[:12] - log.Printf("Starting container id: %s image: %s", shortID, c.Image) - - if err := c.provider.client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}); err != nil { - return err - } - - // if a Wait Strategy has been specified, wait before returning - if c.WaitingFor != nil { - log.Printf("Waiting for container id %s image: %s", shortID, c.Image) - if err := c.WaitingFor.WaitUntilReady(ctx, c); err != nil { - return err - } - } - log.Printf("Container is ready id: %s image: %s", shortID, c.Image) - - return nil -} - -// Terminate is used to kill the container. It is usually triggered by as defer function. -func (c *DockerContainer) Terminate(ctx context.Context) error { - err := c.provider.client.ContainerRemove(ctx, c.GetContainerID(), types.ContainerRemoveOptions{ - RemoveVolumes: true, - Force: true, - }) - - if err == nil { - c.sessionID = uuid.UUID{} - c.raw = nil - } - - return err -} - -func (c *DockerContainer) inspectContainer(ctx context.Context) (*types.ContainerJSON, error) { - if c.raw != nil { - return c.raw, nil - } - inspect, err := c.provider.client.ContainerInspect(ctx, c.ID) - if err != nil { - return nil, err - } - c.raw = &inspect - - return c.raw, nil -} - -// Logs will fetch both STDOUT and STDERR from the current container. Returns a -// ReadCloser and leaves it up to the caller to extract what it wants. -func (c *DockerContainer) Logs(ctx context.Context) (io.ReadCloser, error) { - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - } - return c.provider.client.ContainerLogs(ctx, c.ID, options) -} - -// FollowOutput adds a LogConsumer to be sent logs from the container's -// STDOUT and STDERR -func (c *DockerContainer) FollowOutput(consumer LogConsumer) { - if c.consumers == nil { - c.consumers = []LogConsumer{ - consumer, - } - } else { - c.consumers = append(c.consumers, consumer) - } -} - -// Name gets the name of the container. -func (c *DockerContainer) Name(ctx context.Context) (string, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return "", err - } - return inspect.Name, nil -} - -// Networks gets the names of the networks the container is attached to. -func (c *DockerContainer) Networks(ctx context.Context) ([]string, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return []string{}, err - } - - networks := inspect.NetworkSettings.Networks - - n := []string{} - - for k := range networks { - n = append(n, k) - } - - return n, nil -} - -// NetworkAliases gets the aliases of the container for the networks it is attached to. -func (c *DockerContainer) NetworkAliases(ctx context.Context) (map[string][]string, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return map[string][]string{}, err - } - - networks := inspect.NetworkSettings.Networks - - a := map[string][]string{} - - for k := range networks { - a[k] = networks[k].Aliases - } - - return a, nil -} - -func (c *DockerContainer) Exec(ctx context.Context, cmd []string) (int, error) { - cli := c.provider.client - response, err := cli.ContainerExecCreate(ctx, c.ID, types.ExecConfig{ - Cmd: cmd, - Detach: false, - }) - if err != nil { - return 0, err - } - - err = cli.ContainerExecStart(ctx, response.ID, types.ExecStartCheck{ - Detach: false, - }) - if err != nil { - return 0, err - } - - var exitCode int - for { - execResp, err := cli.ContainerExecInspect(ctx, response.ID) - if err != nil { - return 0, err - } - - if !execResp.Running { - exitCode = execResp.ExitCode - break - } - - time.Sleep(100 * time.Millisecond) - } - - return exitCode, nil -} - -// StartLogProducer will start a concurrent process that will continuously read logs -// from the container and will send them to each added LogConsumer -func (c *DockerContainer) StartLogProducer(ctx context.Context) error { - go func() { - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Follow: true, - } - - ctx, cancel := context.WithTimeout(ctx, time.Second*5) - defer cancel() - - r, err := c.provider.client.ContainerLogs(ctx, c.GetContainerID(), options) - if err != nil { - // if we can't get the logs, panic, we can't return an error to anything - // from within this goroutine - panic(err) - } - - for { - select { - case <-c.stopProducer: - err := r.Close() - if err != nil { - // we can't close the read closer, this should never happen - panic(err) - } - return - default: - h := make([]byte, 8) - _, err := r.Read(h) - if err != nil { - // this explicitly ignores errors - // because we want to keep procesing even if one of our reads fails - continue - } - - count := binary.BigEndian.Uint32(h[4:]) - if count == 0 { - continue - } - logType := h[0] - if logType > 2 { - panic(fmt.Sprintf("received inavlid log type: %d", logType)) - } - - // a map of the log type --> int representation in the header, notice the first is blank, this is stdin, but the go docker client doesn't allow following that in logs - logTypes := []string{"", StdoutLog, StderrLog} - - b := make([]byte, count) - _, err = r.Read(b) - if err != nil { - // TODO: add-logger: use logger to log out this error - fmt.Fprintf(os.Stderr, "error occurred reading log with known length %s", err.Error()) - continue - } - for _, c := range c.consumers { - c.Accept(Log{ - LogType: logTypes[logType], - Content: b, - }) - } - } - - } - }() - - return nil -} - -// StopLogProducer will stop the concurrent process that is reading logs -// and sending them to each added LogConsumer -func (c *DockerContainer) StopLogProducer() error { - c.stopProducer <- true - return nil -} - -// DockerNetwork represents a network started using Docker -type DockerNetwork struct { - ID string // Network ID from Docker - Driver string - Name string - provider *DockerProvider - terminationSignal chan bool -} - -// Remove is used to remove the network. It is usually triggered by as defer function. -func (n *DockerNetwork) Remove(_ context.Context) error { - if n.terminationSignal != nil { - n.terminationSignal <- true - } - return nil -} - -// DockerProvider implements the ContainerProvider interface -type DockerProvider struct { - client *client.Client - hostCache string -} - -var _ ContainerProvider = (*DockerProvider)(nil) - -// NewDockerProvider creates a Docker provider with the EnvClient -func NewDockerProvider() (*DockerProvider, error) { - client, err := client.NewEnvClient() - if err != nil { - return nil, err - } - client.NegotiateAPIVersion(context.Background()) - p := &DockerProvider{ - client: client, - } - return p, nil -} - -// BuildImage will build and image from context and Dockerfile, then return the tag -func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (string, error) { - repo := uuid.New() - tag := uuid.New() - - repoTag := fmt.Sprintf("%s:%s", repo, tag) - - buildContext, err := img.GetContext() - if err != nil { - return "", err - } - - buildOptions := types.ImageBuildOptions{ - Dockerfile: img.GetDockerfile(), - Context: buildContext, - Tags: []string{repoTag}, - } - - resp, err := p.client.ImageBuild(ctx, buildContext, buildOptions) - if err != nil { - return "", err - } - - // need to read the response from Docker, I think otherwise the image - // might not finish building before continuing to execute here - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(resp.Body) - if err != nil { - return "", err - } - - resp.Body.Close() - - return repoTag, nil -} - -// CreateContainer fulfills a request for a container without starting it -func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (Container, error) { - exposedPortSet, exposedPortMap, err := nat.ParsePortSpecs(req.ExposedPorts) - if err != nil { - return nil, err - } - - env := []string{} - for envKey, envVar := range req.Env { - env = append(env, envKey+"="+envVar) - } - - if req.Labels == nil { - req.Labels = make(map[string]string) - } - - sessionID := uuid.New() - - var termSignal chan bool - if !req.SkipReaper { - r, err := NewReaper(ctx, sessionID.String(), p, req.ReaperImage) - if err != nil { - return nil, errors.Wrap(err, "creating reaper failed") - } - termSignal, err = r.Connect() - if err != nil { - return nil, errors.Wrap(err, "connecting to reaper failed") - } - for k, v := range r.Labels() { - if _, ok := req.Labels[k]; !ok { - req.Labels[k] = v - } - } - } - - if err = req.Validate(); err != nil { - return nil, err - } - - var tag string - if req.ShouldBuildImage() { - tag, err = p.BuildImage(ctx, &req) - if err != nil { - return nil, err - } - } else { - tag = req.Image - var shouldPullImage bool - - if req.AlwaysPullImage { - shouldPullImage = true // If requested always attempt to pull image - } else { - _, _, err = p.client.ImageInspectWithRaw(ctx, tag) - if err != nil { - if client.IsErrNotFound(err) { - shouldPullImage = true - } else { - return nil, err - } - } - } - - if shouldPullImage { - pullOpt := types.ImagePullOptions{} - if req.RegistryCred != "" { - pullOpt.RegistryAuth = req.RegistryCred - } - - if err := p.attemptToPullImage(ctx, tag, pullOpt); err != nil { - return nil, err - } - } - } - - dockerInput := &container.Config{ - Image: tag, - Env: env, - ExposedPorts: exposedPortSet, - Labels: req.Labels, - Cmd: req.Cmd, - } - - // prepare mounts - mounts := []mount.Mount{} - for hostPath, innerPath := range req.BindMounts { - mounts = append(mounts, mount.Mount{ - Type: mount.TypeBind, - Source: hostPath, - Target: innerPath, - }) - } - for volumeName, innerPath := range req.VolumeMounts { - mounts = append(mounts, mount.Mount{ - Type: mount.TypeVolume, - Source: volumeName, - Target: innerPath, - }) - } - - hostConfig := &container.HostConfig{ - PortBindings: exposedPortMap, - Mounts: mounts, - Tmpfs: req.Tmpfs, - AutoRemove: req.AutoRemove, - Privileged: req.Privileged, - NetworkMode: req.NetworkMode, - } - - endpointConfigs := map[string]*network.EndpointSettings{} - for _, n := range req.Networks { - nw, err := p.GetNetwork(ctx, NetworkRequest{ - Name: n, - }) - if err == nil { - endpointSetting := network.EndpointSettings{ - Aliases: req.NetworkAliases[n], - NetworkID: nw.ID, - } - endpointConfigs[n] = &endpointSetting - } - } - networkingConfig := network.NetworkingConfig{ - EndpointsConfig: endpointConfigs, - } - - resp, err := p.client.ContainerCreate(ctx, dockerInput, hostConfig, &networkingConfig, req.Name) - if err != nil { - return nil, err - } - - c := &DockerContainer{ - ID: resp.ID, - WaitingFor: req.WaitingFor, - Image: tag, - sessionID: sessionID, - provider: p, - terminationSignal: termSignal, - skipReaper: req.SkipReaper, - stopProducer: make(chan bool), - } - - return c, nil -} - -//attemptToPullImage tries to pull the image while respecting the ctx cancellations. -// Besides, if the image cannot be pulled due to ErrorNotFound then no need to retry but terminate immediately. -func (p *DockerProvider) attemptToPullImage(ctx context.Context, tag string, pullOpt types.ImagePullOptions) error { - var ( - err error - pull io.ReadCloser - ) - err = backoff.Retry(func() error { - pull, err = p.client.ImagePull(ctx, tag, pullOpt) - if _, ok := err.(errdefs.ErrNotFound); ok { - return backoff.Permanent(err) - } - return err - }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) - if err != nil { - return err - } - defer pull.Close() - - // download of docker image finishes at EOF of the pull request - _, err = ioutil.ReadAll(pull) - return err -} - -// RunContainer takes a RequestContainer as input and it runs a container via the docker sdk -func (p *DockerProvider) RunContainer(ctx context.Context, req ContainerRequest) (Container, error) { - c, err := p.CreateContainer(ctx, req) - if err != nil { - return nil, err - } - - if err := c.Start(ctx); err != nil { - return c, errors.Wrap(err, "could not start container") - } - - return c, nil -} - -// daemonHost gets the host or ip of the Docker daemon where ports are exposed on -// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel -// You can use the "TC_HOST" env variable to set this yourself -func (p *DockerProvider) daemonHost() (string, error) { - if p.hostCache != "" { - return p.hostCache, nil - } - - host, exists := os.LookupEnv("TC_HOST") - if exists { - p.hostCache = host - return p.hostCache, nil - } - - // infer from Docker host - url, err := url.Parse(p.client.DaemonHost()) - if err != nil { - return "", err - } - - switch url.Scheme { - case "http", "https", "tcp": - p.hostCache = url.Hostname() - case "unix", "npipe": - if inAContainer() { - ip, err := getGatewayIp() - if err != nil { - return "", err - } - p.hostCache = ip - } else { - p.hostCache = "localhost" - } - default: - return "", errors.New("Could not determine host through env or docker host") - } - - return p.hostCache, nil -} - -// CreateNetwork returns the object representing a new network identified by its name -func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) (Network, error) { - if req.Labels == nil { - req.Labels = make(map[string]string) - } - - nc := types.NetworkCreate{ - Driver: req.Driver, - CheckDuplicate: req.CheckDuplicate, - Internal: req.Internal, - EnableIPv6: req.EnableIPv6, - Attachable: req.Attachable, - Labels: req.Labels, - } - - sessionID := uuid.New() - - var termSignal chan bool - if !req.SkipReaper { - r, err := NewReaper(ctx, sessionID.String(), p, req.ReaperImage) - if err != nil { - return nil, errors.Wrap(err, "creating network reaper failed") - } - termSignal, err = r.Connect() - if err != nil { - return nil, errors.Wrap(err, "connecting to network reaper failed") - } - for k, v := range r.Labels() { - if _, ok := req.Labels[k]; !ok { - req.Labels[k] = v - } - } - } - - response, err := p.client.NetworkCreate(ctx, req.Name, nc) - if err != nil { - return &DockerNetwork{}, err - } - - n := &DockerNetwork{ - ID: response.ID, - Driver: req.Driver, - Name: req.Name, - terminationSignal: termSignal, - } - - return n, nil -} - -// GetNetwork returns the object representing the network identified by its name -func (p *DockerProvider) GetNetwork(ctx context.Context, req NetworkRequest) (types.NetworkResource, error) { - networkResource, err := p.client.NetworkInspect(ctx, req.Name, types.NetworkInspectOptions{ - Verbose: true, - }) - if err != nil { - return types.NetworkResource{}, err - } - - return networkResource, err -} - -func inAContainer() bool { - // see https://github.com/testcontainers/testcontainers-java/blob/3ad8d80e2484864e554744a4800a81f6b7982168/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L15 - if _, err := os.Stat("/.dockerenv"); err == nil { - return true - } - return false -} - -func getGatewayIp() (string, error) { - // see https://github.com/testcontainers/testcontainers-java/blob/3ad8d80e2484864e554744a4800a81f6b7982168/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L27 - cmd := exec.Command("sh", "-c", "ip route|awk '/default/ { print $3 }'") - stdout, err := cmd.Output() - if err != nil { - return "", errors.New("Failed to detect docker host") - } - ip := strings.TrimSpace(string(stdout)) - if len(ip) == 0 { - return "", errors.New("Failed to parse default gateway IP") - } - return string(ip), nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/generic.go b/vendor/github.com/testcontainers/testcontainers-go/generic.go deleted file mode 100644 index cf39b8c1..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/generic.go +++ /dev/null @@ -1,41 +0,0 @@ -package testcontainers - -import ( - "context" - - "github.com/pkg/errors" -) - -// GenericContainerRequest represents parameters to a generic container -type GenericContainerRequest struct { - ContainerRequest // embedded request for provider - Started bool // whether to auto-start the container - ProviderType ProviderType // which provider to use, Docker if empty -} - -// GenericContainer creates a generic container with parameters -func GenericContainer(ctx context.Context, req GenericContainerRequest) (Container, error) { - provider, err := req.ProviderType.GetProvider() - if err != nil { - return nil, err - } - - c, err := provider.CreateContainer(ctx, req.ContainerRequest) - if err != nil { - return nil, errors.Wrap(err, "failed to create container") - } - - if req.Started { - if err := c.Start(ctx); err != nil { - return c, errors.Wrap(err, "failed to start container") - } - } - - return c, nil -} - -// GenericProvider represents an abstraction for container and network providers -type GenericProvider interface { - ContainerProvider - NetworkProvider -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/go.mod b/vendor/github.com/testcontainers/testcontainers-go/go.mod deleted file mode 100644 index 5703a700..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/go.mod +++ /dev/null @@ -1,37 +0,0 @@ -module github.com/testcontainers/testcontainers-go - -go 1.13 - -require ( - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect - github.com/Microsoft/go-winio v0.4.11 // indirect - github.com/Microsoft/hcsshim v0.8.6 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible - github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect - github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible // indirect - github.com/docker/docker v0.7.3-0.20190506211059-b20a14b54661 - github.com/docker/go-connections v0.4.0 - github.com/docker/go-units v0.3.3 // indirect - github.com/gin-gonic/gin v1.6.2 - github.com/go-redis/redis v6.15.7+incompatible - github.com/go-sql-driver/mysql v1.5.0 - github.com/gogo/protobuf v1.2.0 // indirect - github.com/google/uuid v1.1.1 - github.com/gorilla/context v1.1.1 // indirect - github.com/gorilla/mux v1.6.2 // indirect - github.com/kr/pretty v0.1.0 // indirect - github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect - github.com/onsi/ginkgo v1.8.0 // indirect - github.com/onsi/gomega v1.5.0 // indirect - github.com/opencontainers/go-digest v1.0.0-rc1 // indirect - github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/opencontainers/runc v0.1.1 // indirect - github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.2.0 // indirect - golang.org/x/time v0.0.0-20181108054448-85acf8d2951c // indirect - google.golang.org/grpc v1.17.0 // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gotest.tools v0.0.0-20181223230014-1083505acf35 -) - -replace github.com/docker/docker => github.com/docker/engine v0.0.0-20190717161051-705d9623b7c1 diff --git a/vendor/github.com/testcontainers/testcontainers-go/go.sum b/vendor/github.com/testcontainers/testcontainers-go/go.sum deleted file mode 100644 index edfa5b54..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/go.sum +++ /dev/null @@ -1,157 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/engine v0.0.0-20190717161051-705d9623b7c1 h1:pKV3lCoWunXtXfyRUcqYflvdaiFU3BMxHw5izMsYDhY= -github.com/docker/engine v0.0.0-20190717161051-705d9623b7c1/go.mod h1:3CPr2caMgTHxxIAZgEMd3uLYPDlRvPqCpyeRf6ncPcY= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.2 h1:88crIK23zO6TqlQBt+f9FrPJNKm9ZEr7qjp9vl/d5TM= -github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U= -github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v0.0.0-20181223230014-1083505acf35 h1:zpdCK+REwbk+rqjJmHhiCN6iBIigrZ39glqSF0P3KF0= -gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go b/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go deleted file mode 100644 index c5a2e29c..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go +++ /dev/null @@ -1,22 +0,0 @@ -package testcontainers - -// StdoutLog is the log type for STDOUT -const StdoutLog = "STDOUT" - -// StderrLog is the log type for STDERR -const StderrLog = "STDERR" - -// Log represents a message that was created by a process, -// LogType is either "STDOUT" or "STDERR", -// Content is the byte contents of the message itself -type Log struct { - LogType string - Content []byte -} - -// LogConsumer represents any object that can -// handle a Log, it is up to the LogConsumer instance -// what to do with the log -type LogConsumer interface { - Accept(Log) -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/network.go b/vendor/github.com/testcontainers/testcontainers-go/network.go deleted file mode 100644 index ce99a017..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/network.go +++ /dev/null @@ -1,32 +0,0 @@ -package testcontainers - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -// NetworkProvider allows the creation of networks on an arbitrary system -type NetworkProvider interface { - CreateNetwork(context.Context, NetworkRequest) (Network, error) // create a network - GetNetwork(context.Context, NetworkRequest) (types.NetworkResource, error) // get a network -} - -// Network allows getting info about a single network instance -type Network interface { - Remove(context.Context) error // removes the network -} - -// NetworkRequest represents the parameters used to get a network -type NetworkRequest struct { - Driver string - CheckDuplicate bool - Internal bool - EnableIPv6 bool - Name string - Labels map[string]string - Attachable bool - - SkipReaper bool // indicates whether we skip setting up a reaper for this - ReaperImage string //alternative reaper registry -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/reaper.go b/vendor/github.com/testcontainers/testcontainers-go/reaper.go deleted file mode 100644 index bee6b253..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/reaper.go +++ /dev/null @@ -1,131 +0,0 @@ -package testcontainers - -import ( - "bufio" - "context" - "fmt" - "github.com/docker/go-connections/nat" - "github.com/testcontainers/testcontainers-go/wait" - "net" - "strings" - "time" - - "github.com/pkg/errors" -) - -const ( - TestcontainerLabel = "org.testcontainers.golang" - TestcontainerLabelSessionID = TestcontainerLabel + ".sessionId" - TestcontainerLabelIsReaper = TestcontainerLabel + ".reaper" - - ReaperDefaultImage = "quay.io/testcontainers/ryuk:0.2.3" -) - -// ReaperProvider represents a provider for the reaper to run itself with -// The ContainerProvider interface should usually satisfy this as well, so it is pluggable -type ReaperProvider interface { - RunContainer(ctx context.Context, req ContainerRequest) (Container, error) -} - -// Reaper is used to start a sidecar container that cleans up resources -type Reaper struct { - Provider ReaperProvider - SessionID string - Endpoint string -} - -// NewReaper creates a Reaper with a sessionID to identify containers and a provider to use -func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, reaperImageName string) (*Reaper, error) { - r := &Reaper{ - Provider: provider, - SessionID: sessionID, - } - - listeningPort := nat.Port("8080/tcp") - - // TODO: reuse reaper if there already is one - req := ContainerRequest{ - Image: reaperImage(reaperImageName), - ExposedPorts: []string{string(listeningPort)}, - Labels: map[string]string{ - TestcontainerLabel: "true", - TestcontainerLabelIsReaper: "true", - }, - SkipReaper: true, - BindMounts: map[string]string{ - "/var/run/docker.sock": "/var/run/docker.sock", - }, - AutoRemove: true, - WaitingFor: wait.ForListeningPort(listeningPort), - } - - c, err := provider.RunContainer(ctx, req) - if err != nil { - return nil, err - } - - endpoint, err := c.PortEndpoint(ctx, "8080", "") - if err != nil { - return nil, err - } - r.Endpoint = endpoint - - return r, nil -} - -func reaperImage(reaperImageName string) string { - if reaperImageName == "" { - return ReaperDefaultImage - } else { - return reaperImageName - } -} - -// Connect runs a goroutine which can be terminated by sending true into the returned channel -func (r *Reaper) Connect() (chan bool, error) { - conn, err := net.DialTimeout("tcp", r.Endpoint, 10*time.Second) - if err != nil { - return nil, errors.Wrap(err, "Connecting to Ryuk on "+r.Endpoint+" failed") - } - - terminationSignal := make(chan bool) - go func(conn net.Conn) { - sock := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) - defer conn.Close() - - labelFilters := []string{} - for l, v := range r.Labels() { - labelFilters = append(labelFilters, fmt.Sprintf("label=%s=%s", l, v)) - } - - retryLimit := 3 - for retryLimit > 0 { - retryLimit-- - - sock.WriteString(strings.Join(labelFilters, "&")) - sock.WriteString("\n") - if err := sock.Flush(); err != nil { - continue - } - - resp, err := sock.ReadString('\n') - if err != nil { - continue - } - if resp == "ACK\n" { - break - } - } - - <-terminationSignal - }(conn) - return terminationSignal, nil -} - -// Labels returns the container labels to use so that this Reaper cleans them up -func (r *Reaper) Labels() map[string]string { - return map[string]string{ - TestcontainerLabel: "true", - TestcontainerLabelSessionID: r.SessionID, - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go b/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go deleted file mode 100644 index dd675033..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go +++ /dev/null @@ -1,116 +0,0 @@ -package wait - -import ( - "context" - "fmt" - "net" - "os" - "strconv" - "syscall" - "time" - - "github.com/pkg/errors" - - "github.com/docker/go-connections/nat" -) - -// Implement interface -var _ Strategy = (*HostPortStrategy)(nil) - -type HostPortStrategy struct { - Port nat.Port - // all WaitStrategies should have a startupTimeout to avoid waiting infinitely - startupTimeout time.Duration -} - -// NewHostPortStrategy constructs a default host port strategy -func NewHostPortStrategy(port nat.Port) *HostPortStrategy { - return &HostPortStrategy{ - Port: port, - startupTimeout: defaultStartupTimeout(), - } -} - -// fluent builders for each property -// since go has neither covariance nor generics, the return type must be the type of the concrete implementation -// this is true for all properties, even the "shared" ones like startupTimeout - -// ForListeningPort is a helper similar to those in Wait.java -// https://github.com/testcontainers/testcontainers-java/blob/1d85a3834bd937f80aad3a4cec249c027f31aeb4/core/src/main/java/org/testcontainers/containers/wait/strategy/Wait.java -func ForListeningPort(port nat.Port) *HostPortStrategy { - return NewHostPortStrategy(port) -} - -func (hp *HostPortStrategy) WithStartupTimeout(startupTimeout time.Duration) *HostPortStrategy { - hp.startupTimeout = startupTimeout - return hp -} - -// WaitUntilReady implements Strategy.WaitUntilReady -func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { - // limit context to startupTimeout - ctx, cancelContext := context.WithTimeout(ctx, hp.startupTimeout) - defer cancelContext() - - ipAddress, err := target.Host(ctx) - if err != nil { - return - } - - port, err := target.MappedPort(ctx, hp.Port) - if err != nil { - return - } - - proto := port.Proto() - portNumber := port.Int() - portString := strconv.Itoa(portNumber) - - //external check - dialer := net.Dialer{} - address := net.JoinHostPort(ipAddress, portString) - for { - conn, err := dialer.DialContext(ctx, proto, address) - if err != nil { - if v, ok := err.(*net.OpError); ok { - if v2, ok := (v.Err).(*os.SyscallError); ok { - if v2.Err == syscall.ECONNREFUSED && ctx.Err() == nil { - time.Sleep(100 * time.Millisecond) - continue - } - } - } - return err - } else { - conn.Close() - break - } - } - - //internal check - command := buildInternalCheckCommand(hp.Port.Int()) - for { - exitCode, err := target.Exec(ctx, []string{"/bin/sh", "-c", command}) - if err != nil { - return errors.Wrapf(err, "host port waiting failed") - } - - if exitCode == 0 { - break - } else if exitCode == 126 { - return errors.New("/bin/sh command not executable") - } - } - - return nil -} - -func buildInternalCheckCommand(internalPort int) string { - command := `( - cat /proc/net/tcp* | awk '{print $2}' | grep -i :%04x || - nc -vz -w 1 localhost %d || - /bin/sh -c '= ws.Occurrence-1 { - break LOOP - } - } else { - time.Sleep(ws.PollInterval) - continue - } - } - } - - return nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/multi.go b/vendor/github.com/testcontainers/testcontainers-go/wait/multi.go deleted file mode 100644 index 468a5a19..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/multi.go +++ /dev/null @@ -1,47 +0,0 @@ -package wait - -import ( - "context" - "fmt" - "time" -) - -// Implement interface -var _ Strategy = (*MultiStrategy)(nil) - -type MultiStrategy struct { - // all Strategies should have a startupTimeout to avoid waiting infinitely - startupTimeout time.Duration - - // additional properties - Strategies []Strategy -} - -func (ms *MultiStrategy) WithStartupTimeout(startupTimeout time.Duration) *MultiStrategy { - ms.startupTimeout = startupTimeout - return ms -} - -func ForAll(strategies ...Strategy) *MultiStrategy { - return &MultiStrategy{ - startupTimeout: defaultStartupTimeout(), - Strategies: strategies, - } -} - -func (ms *MultiStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { - ctx, cancelContext := context.WithTimeout(ctx, ms.startupTimeout) - defer cancelContext() - - if len(ms.Strategies) == 0 { - return fmt.Errorf("no wait strategy supplied") - } - - for _, strategy := range ms.Strategies { - err := strategy.WaitUntilReady(ctx, target) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go b/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go deleted file mode 100644 index 4498f8d0..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go +++ /dev/null @@ -1,66 +0,0 @@ -package wait - -import ( - "context" - "database/sql" - "fmt" - "github.com/docker/go-connections/nat" - "time" -) - -//ForSQL constructs a new waitForSql strategy for the given driver -func ForSQL(port nat.Port, driver string, url func(nat.Port) string) *waitForSql { - return &waitForSql{ - Port: port, - URL: url, - Driver: driver, - } -} - -type waitForSql struct { - URL func(port nat.Port) string - Driver string - Port nat.Port - startupTimeout time.Duration -} - -//Timeout sets the maximum waiting time for the strategy after which it'll give up and return an error -func (w *waitForSql) Timeout(duration time.Duration) *waitForSql { - w.startupTimeout = duration - return w -} - -//WaitUntilReady repeatedly tries to run "SELECT 1" query on the given port using sql and driver. -// If the it doesn't succeed until the timeout value which defaults to 10 seconds, it will return an error -func (w *waitForSql) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { - if w.startupTimeout == 0 { - w.startupTimeout = time.Second * 10 - } - ctx, cancel := context.WithTimeout(ctx, w.startupTimeout) - defer cancel() - - ticker := time.NewTicker(time.Millisecond * 100) - defer ticker.Stop() - - port, err := target.MappedPort(ctx, w.Port) - if err != nil { - return fmt.Errorf("target.MappedPort: %v", err) - } - - db, err := sql.Open(w.Driver, w.URL(port)) - if err != nil { - return fmt.Errorf("sql.Open: %v", err) - } - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - - if _, err := db.ExecContext(ctx, "SELECT 1"); err != nil { - continue - } - return nil - } - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go b/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go deleted file mode 100644 index f71b9e99..00000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go +++ /dev/null @@ -1,24 +0,0 @@ -package wait - -import ( - "context" - "io" - "time" - - "github.com/docker/go-connections/nat" -) - -type Strategy interface { - WaitUntilReady(context.Context, StrategyTarget) error -} - -type StrategyTarget interface { - Host(context.Context) (string, error) - MappedPort(context.Context, nat.Port) (nat.Port, error) - Logs(context.Context) (io.ReadCloser, error) - Exec(ctx context.Context, cmd []string) (int, error) -} - -func defaultStartupTimeout() time.Duration { - return 60 * time.Second -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml deleted file mode 100644 index 13ff9986..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go - -go: - - 1.12 - diff --git a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md deleted file mode 100644 index b3bc3b61..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md +++ /dev/null @@ -1,10 +0,0 @@ -# 1.0.1 (July 30, 2019) - -* The YAML decoder is now correctly treating quoted scalars as verbatim literal - strings rather than using the fuzzy type selection rules for them. Fuzzy - type selection rules still apply to unquoted scalars. - ([#4](https://github.com/zclconf/go-cty-yaml/pull/4)) - -# 1.0.0 (May 26, 2019) - -Initial release. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml deleted file mode 100644 index 8da58fbf..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/zclconf/go-cty-yaml/NOTICE b/vendor/github.com/zclconf/go-cty-yaml/NOTICE deleted file mode 100644 index 4e6c00ab..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -This package is derived from gopkg.in/yaml.v2, which is copyright -2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Includes mechanical ports of code from libyaml, distributed under its original -license. See LICENSE.libyaml for more information. - -Modifications for cty interfacing copyright 2019 Martin Atkins, and -distributed under the same license terms. diff --git a/vendor/github.com/zclconf/go-cty-yaml/apic.go b/vendor/github.com/zclconf/go-cty-yaml/apic.go deleted file mode 100644 index 1f7e87e6..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/apic.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/github.com/zclconf/go-cty-yaml/converter.go b/vendor/github.com/zclconf/go-cty-yaml/converter.go deleted file mode 100644 index a73b34a8..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/converter.go +++ /dev/null @@ -1,69 +0,0 @@ -package yaml - -import ( - "github.com/zclconf/go-cty/cty" -) - -// ConverterConfig is used to configure a new converter, using NewConverter. -type ConverterConfig struct { - // EncodeAsFlow, when set to true, causes Marshal to produce flow-style - // mapping and sequence serializations. - EncodeAsFlow bool -} - -// A Converter can marshal and unmarshal between cty values and YAML bytes. -// -// Because there are many different ways to map cty to YAML and vice-versa, -// a converter is configurable using the settings in ConverterConfig, which -// allow for a few different permutations of mapping to YAML. -// -// If you are just trying to work with generic, standard YAML, the predefined -// converter in Standard should be good enough. -type Converter struct { - encodeAsFlow bool -} - -// NewConverter creates a new Converter with the given configuration. -func NewConverter(config *ConverterConfig) *Converter { - return &Converter{ - encodeAsFlow: config.EncodeAsFlow, - } -} - -// Standard is a predefined Converter that produces and consumes generic YAML -// using only built-in constructs that any other YAML implementation ought to -// understand. -var Standard *Converter = NewConverter(&ConverterConfig{}) - -// ImpliedType analyzes the given source code and returns a suitable type that -// it could be decoded into. -// -// For a converter that is using standard YAML rather than cty-specific custom -// tags, only a subset of cty types can be produced: strings, numbers, bools, -// tuple types, and object types. -func (c *Converter) ImpliedType(src []byte) (cty.Type, error) { - return c.impliedType(src) -} - -// Marshal serializes the given value into a YAML document, using a fixed -// mapping from cty types to YAML constructs. -// -// Note that unlike the function of the same name in the cty JSON package, -// this does not take a type constraint and therefore the YAML serialization -// cannot preserve late-bound type information in the serialization to be -// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type -// constraint given to Unmarshal will be decoded as if the corresponding portion -// of the input were processed with ImpliedType to find a target type. -func (c *Converter) Marshal(v cty.Value) ([]byte, error) { - return c.marshal(v) -} - -// Unmarshal reads the document found within the given source buffer -// and attempts to convert it into a value conforming to the given type -// constraint. -// -// An error is returned if the given source contains any YAML document -// delimiters. -func (c *Converter) Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - return c.unmarshal(src, ty) -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go deleted file mode 100644 index b91141cc..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go +++ /dev/null @@ -1,57 +0,0 @@ -package yaml - -import ( - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// YAMLDecodeFunc is a cty function for decoding arbitrary YAML source code -// into a cty Value, using the ImpliedType and Unmarshal methods of the -// Standard pre-defined converter. -var YAMLDecodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "src", - Type: cty.String, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].IsKnown() { - return cty.DynamicPseudoType, nil - } - if args[0].IsNull() { - return cty.NilType, function.NewArgErrorf(0, "YAML source code cannot be null") - } - return Standard.ImpliedType([]byte(args[0].AsString())) - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - if retType == cty.DynamicPseudoType { - return cty.DynamicVal, nil - } - return Standard.Unmarshal([]byte(args[0].AsString()), retType) - }, -}) - -// YAMLEncodeFunc is a cty function for encoding an arbitrary cty value -// into YAML. -var YAMLEncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowNull: true, - AllowDynamicType: true, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - if !args[0].IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - raw, err := Standard.Marshal(args[0]) - if err != nil { - return cty.NilVal, err - } - return cty.StringVal(string(raw)), nil - }, -}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/decode.go b/vendor/github.com/zclconf/go-cty-yaml/decode.go deleted file mode 100644 index e369ff27..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/decode.go +++ /dev/null @@ -1,261 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -func (c *Converter) unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - p := &yaml_parser_t{} - if !yaml_parser_initialize(p) { - return cty.NilVal, errors.New("failed to initialize YAML parser") - } - if len(src) == 0 { - src = []byte{'\n'} - } - - an := &valueAnalysis{ - anchorsPending: map[string]int{}, - anchorVals: map[string]cty.Value{}, - } - - yaml_parser_set_input_string(p, src) - - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_STREAM_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "missing stream start token") - } - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_DOCUMENT_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "missing start of document") - } - - v, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ == yaml_DOCUMENT_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "only a single document is allowed") - } - if evt.typ != yaml_DOCUMENT_END_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) - } - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_STREAM_END_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content after value") - } - - return convert.Convert(v, ty) -} - -func (c *Converter) unmarshalParse(an *valueAnalysis, p *yaml_parser_t) (cty.Value, error) { - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - return c.unmarshalParseRemainder(an, &evt, p) -} - -func (c *Converter) unmarshalParseRemainder(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - switch evt.typ { - case yaml_SCALAR_EVENT: - return c.unmarshalScalar(an, evt, p) - case yaml_ALIAS_EVENT: - return c.unmarshalAlias(an, evt, p) - case yaml_MAPPING_START_EVENT: - return c.unmarshalMapping(an, evt, p) - case yaml_SEQUENCE_START_EVENT: - return c.unmarshalSequence(an, evt, p) - case yaml_DOCUMENT_START_EVENT: - return cty.NilVal, parseEventErrorf(evt, "only a single document is allowed") - case yaml_STREAM_END_EVENT: - // Decoding an empty buffer, probably - return cty.NilVal, parseEventErrorf(evt, "expecting value but found end of stream") - default: - // Should never happen; the above should be comprehensive - return cty.NilVal, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) - } -} - -func (c *Converter) unmarshalScalar(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - src := evt.value - tag := string(evt.tag) - anchor := string(evt.anchor) - - if len(anchor) > 0 { - an.beginAnchor(anchor) - } - - val, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) - if err != nil { - return cty.NilVal, parseEventErrorWrap(evt, err) - } - - if val.RawEquals(mergeMappingVal) { - // In any context other than a mapping key, this is just a plain string - val = cty.StringVal("<<") - } - - if len(anchor) > 0 { - an.completeAnchor(anchor, val) - } - return val, nil -} - -func (c *Converter) unmarshalMapping(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_MAP_TAG { - return cty.NilVal, parseEventErrorf(evt, "can't interpret mapping as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - vals := make(map[string]cty.Value) - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilVal, parserError(p) - } - if nextEvt.typ == yaml_MAPPING_END_EVENT { - v := cty.ObjectVal(vals) - if anchor != "" { - an.completeAnchor(anchor, v) - } - return v, nil - } - - if nextEvt.typ != yaml_SCALAR_EVENT { - return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) - if err != nil { - return cty.NilVal, err - } - if keyVal.RawEquals(mergeMappingVal) { - // Merging the value (which must be a mapping) into our mapping, - // then. - val, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - ty := val.Type() - if !(ty.IsObjectType() || ty.IsMapType()) { - return cty.NilVal, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) - } - for it := val.ElementIterator(); it.Next(); { - k, v := it.Element() - vals[k.AsString()] = v - } - continue - } - if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { - keyVal = keyValStr - } else { - return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - if keyVal.IsNull() { - return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key cannot be null") - } - if !keyVal.IsKnown() { - return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key must be known") - } - val, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - - vals[keyVal.AsString()] = val - } -} - -func (c *Converter) unmarshalSequence(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_SEQ_TAG { - return cty.NilVal, parseEventErrorf(evt, "can't interpret sequence as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - var vals []cty.Value - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilVal, parserError(p) - } - if nextEvt.typ == yaml_SEQUENCE_END_EVENT { - ty := cty.TupleVal(vals) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - val, err := c.unmarshalParseRemainder(an, &nextEvt, p) - if err != nil { - return cty.NilVal, err - } - - vals = append(vals, val) - } -} - -func (c *Converter) unmarshalAlias(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - v, err := an.anchorVal(string(evt.anchor)) - if err != nil { - err = parseEventErrorWrap(evt, err) - } - return v, err -} - -type valueAnalysis struct { - anchorsPending map[string]int - anchorVals map[string]cty.Value -} - -func (an *valueAnalysis) beginAnchor(name string) { - an.anchorsPending[name]++ -} - -func (an *valueAnalysis) completeAnchor(name string, v cty.Value) { - an.anchorsPending[name]-- - if an.anchorsPending[name] == 0 { - delete(an.anchorsPending, name) - } - an.anchorVals[name] = v -} - -func (an *valueAnalysis) anchorVal(name string) (cty.Value, error) { - if _, pending := an.anchorsPending[name]; pending { - // YAML normally allows self-referencing structures, but cty cannot - // represent them (it requires all structures to be finite) so we - // must fail here. - return cty.NilVal, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) - } - ty, ok := an.anchorVals[name] - if !ok { - return cty.NilVal, fmt.Errorf("reference to undefined anchor %q", name) - } - return ty, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go deleted file mode 100644 index a1c2cc52..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/encode.go b/vendor/github.com/zclconf/go-cty-yaml/encode.go deleted file mode 100644 index daa1478a..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/encode.go +++ /dev/null @@ -1,189 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" - "strings" - - "github.com/zclconf/go-cty/cty" -) - -func (c *Converter) marshal(v cty.Value) ([]byte, error) { - var buf bytes.Buffer - - e := &yaml_emitter_t{} - yaml_emitter_initialize(e) - yaml_emitter_set_output_writer(e, &buf) - yaml_emitter_set_unicode(e, true) - - var evt yaml_event_t - yaml_stream_start_event_initialize(&evt, yaml_UTF8_ENCODING) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - yaml_document_start_event_initialize(&evt, nil, nil, true) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - - if err := c.marshalEmit(v, e); err != nil { - return nil, err - } - - yaml_document_end_event_initialize(&evt, true) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - yaml_stream_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - - return buf.Bytes(), nil -} - -func (c *Converter) marshalEmit(v cty.Value, e *yaml_emitter_t) error { - ty := v.Type() - switch { - case v.IsNull(): - return c.marshalPrimitive(v, e) - case !v.IsKnown(): - return fmt.Errorf("cannot serialize unknown value as YAML") - case ty.IsPrimitiveType(): - return c.marshalPrimitive(v, e) - case ty.IsTupleType(), ty.IsListType(), ty.IsSetType(): - return c.marshalSequence(v, e) - case ty.IsObjectType(), ty.IsMapType(): - return c.marshalMapping(v, e) - default: - return fmt.Errorf("can't marshal %s as YAML", ty.FriendlyName()) - } -} - -func (c *Converter) marshalPrimitive(v cty.Value, e *yaml_emitter_t) error { - var evt yaml_event_t - - if v.IsNull() { - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte("null"), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil - } - - switch v.Type() { - case cty.String: - str := v.AsString() - style := yaml_DOUBLE_QUOTED_SCALAR_STYLE - if strings.Contains(str, "\n") { - style = yaml_LITERAL_SCALAR_STYLE - } - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - style, - ) - case cty.Number: - str := v.AsBigFloat().Text('f', -1) - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - case cty.Bool: - var str string - switch v { - case cty.True: - str = "true" - case cty.False: - str = "false" - } - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - } - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} - -func (c *Converter) marshalSequence(v cty.Value, e *yaml_emitter_t) error { - style := yaml_BLOCK_SEQUENCE_STYLE - if c.encodeAsFlow { - style = yaml_FLOW_SEQUENCE_STYLE - } - - var evt yaml_event_t - yaml_sequence_start_event_initialize(&evt, nil, nil, true, style) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - - for it := v.ElementIterator(); it.Next(); { - _, v := it.Element() - err := c.marshalEmit(v, e) - if err != nil { - return err - } - } - - yaml_sequence_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} - -func (c *Converter) marshalMapping(v cty.Value, e *yaml_emitter_t) error { - style := yaml_BLOCK_MAPPING_STYLE - if c.encodeAsFlow { - style = yaml_FLOW_MAPPING_STYLE - } - - var evt yaml_event_t - yaml_mapping_start_event_initialize(&evt, nil, nil, true, style) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - - for it := v.ElementIterator(); it.Next(); { - k, v := it.Element() - err := c.marshalEmit(k, e) - if err != nil { - return err - } - err = c.marshalEmit(v, e) - if err != nil { - return err - } - } - - yaml_mapping_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/error.go b/vendor/github.com/zclconf/go-cty-yaml/error.go deleted file mode 100644 index ae41c488..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/error.go +++ /dev/null @@ -1,97 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" -) - -// Error is an error implementation used to report errors that correspond to -// a particular position in an input buffer. -type Error struct { - cause error - Line, Column int -} - -func (e Error) Error() string { - return fmt.Sprintf("on line %d, column %d: %s", e.Line, e.Column, e.cause.Error()) -} - -// Cause is an implementation of the interface used by -// github.com/pkg/errors.Cause, returning the underlying error without the -// position information. -func (e Error) Cause() error { - return e.cause -} - -// WrappedErrors is an implementation of github.com/hashicorp/errwrap.Wrapper -// returning the underlying error without the position information. -func (e Error) WrappedErrors() []error { - return []error{e.cause} -} - -func parserError(p *yaml_parser_t) error { - var cause error - if len(p.problem) > 0 { - cause = errors.New(p.problem) - } else { - cause = errors.New("invalid YAML syntax") // useless generic error, then - } - - return parserErrorWrap(p, cause) -} - -func parserErrorWrap(p *yaml_parser_t, cause error) error { - switch { - case p.problem_mark.line != 0: - line := p.problem_mark.line - column := p.problem_mark.column - // Scanner errors don't iterate line before returning error - if p.error == yaml_SCANNER_ERROR { - line++ - column = 0 - } - return Error{ - cause: cause, - Line: line, - Column: column + 1, - } - case p.context_mark.line != 0: - return Error{ - cause: cause, - Line: p.context_mark.line, - Column: p.context_mark.column + 1, - } - default: - return cause - } -} - -func parserErrorf(p *yaml_parser_t, f string, vals ...interface{}) error { - return parserErrorWrap(p, fmt.Errorf(f, vals...)) -} - -func parseEventErrorWrap(evt *yaml_event_t, cause error) error { - if evt.start_mark.line == 0 { - // Event does not have a start mark, so we won't wrap the error at all - return cause - } - return Error{ - cause: cause, - Line: evt.start_mark.line, - Column: evt.start_mark.column + 1, - } -} - -func parseEventErrorf(evt *yaml_event_t, f string, vals ...interface{}) error { - return parseEventErrorWrap(evt, fmt.Errorf(f, vals...)) -} - -func emitterError(e *yaml_emitter_t) error { - var cause error - if len(e.problem) > 0 { - cause = errors.New(e.problem) - } else { - cause = errors.New("failed to write YAML token") // useless generic error, then - } - return cause -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.mod b/vendor/github.com/zclconf/go-cty-yaml/go.mod deleted file mode 100644 index 3d522687..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/zclconf/go-cty-yaml - -require github.com/zclconf/go-cty v1.0.0 diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.sum b/vendor/github.com/zclconf/go-cty-yaml/go.sum deleted file mode 100644 index 841f7fcf..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/go.sum +++ /dev/null @@ -1,18 +0,0 @@ -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/zclconf/go-cty v1.0.0 h1:EWtv3gKe2wPLIB9hQRQJa7k/059oIfAqcEkCNnaVckk= -github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go deleted file mode 100644 index 5b7b0686..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go +++ /dev/null @@ -1,268 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -func (c *Converter) impliedType(src []byte) (cty.Type, error) { - p := &yaml_parser_t{} - if !yaml_parser_initialize(p) { - return cty.NilType, errors.New("failed to initialize YAML parser") - } - if len(src) == 0 { - src = []byte{'\n'} - } - - an := &typeAnalysis{ - anchorsPending: map[string]int{}, - anchorTypes: map[string]cty.Type{}, - } - - yaml_parser_set_input_string(p, src) - - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_STREAM_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "missing stream start token") - } - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_DOCUMENT_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "missing start of document") - } - - ty, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ == yaml_DOCUMENT_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "only a single document is allowed") - } - if evt.typ != yaml_DOCUMENT_END_EVENT { - return cty.NilType, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) - } - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_STREAM_END_EVENT { - return cty.NilType, parseEventErrorf(&evt, "unexpected extra content after value") - } - - return ty, err -} - -func (c *Converter) impliedTypeParse(an *typeAnalysis, p *yaml_parser_t) (cty.Type, error) { - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - return c.impliedTypeParseRemainder(an, &evt, p) -} - -func (c *Converter) impliedTypeParseRemainder(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - switch evt.typ { - case yaml_SCALAR_EVENT: - return c.impliedTypeScalar(an, evt, p) - case yaml_ALIAS_EVENT: - return c.impliedTypeAlias(an, evt, p) - case yaml_MAPPING_START_EVENT: - return c.impliedTypeMapping(an, evt, p) - case yaml_SEQUENCE_START_EVENT: - return c.impliedTypeSequence(an, evt, p) - case yaml_DOCUMENT_START_EVENT: - return cty.NilType, parseEventErrorf(evt, "only a single document is allowed") - case yaml_STREAM_END_EVENT: - // Decoding an empty buffer, probably - return cty.NilType, parseEventErrorf(evt, "expecting value but found end of stream") - default: - // Should never happen; the above should be comprehensive - return cty.NilType, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) - } -} - -func (c *Converter) impliedTypeScalar(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - src := evt.value - tag := string(evt.tag) - anchor := string(evt.anchor) - implicit := evt.implicit - - if len(anchor) > 0 { - an.beginAnchor(anchor) - } - - var ty cty.Type - switch { - case tag == "" && !implicit: - // Untagged explicit string - ty = cty.String - default: - v, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) - if err != nil { - return cty.NilType, parseEventErrorWrap(evt, err) - } - if v.RawEquals(mergeMappingVal) { - // In any context other than a mapping key, this is just a plain string - ty = cty.String - } else { - ty = v.Type() - } - } - - if len(anchor) > 0 { - an.completeAnchor(anchor, ty) - } - return ty, nil -} - -func (c *Converter) impliedTypeMapping(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_MAP_TAG { - return cty.NilType, parseEventErrorf(evt, "can't interpret mapping as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - atys := make(map[string]cty.Type) - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilType, parserError(p) - } - if nextEvt.typ == yaml_MAPPING_END_EVENT { - ty := cty.Object(atys) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - if nextEvt.typ != yaml_SCALAR_EVENT { - return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) - if err != nil { - return cty.NilType, err - } - if keyVal.RawEquals(mergeMappingVal) { - // Merging the value (which must be a mapping) into our mapping, - // then. - ty, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - if !ty.IsObjectType() { - return cty.NilType, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) - } - for name, aty := range ty.AttributeTypes() { - atys[name] = aty - } - continue - } - if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { - keyVal = keyValStr - } else { - return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - if keyVal.IsNull() { - return cty.NilType, parseEventErrorf(&nextEvt, "mapping key cannot be null") - } - if !keyVal.IsKnown() { - return cty.NilType, parseEventErrorf(&nextEvt, "mapping key must be known") - } - valTy, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - - atys[keyVal.AsString()] = valTy - } -} - -func (c *Converter) impliedTypeSequence(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_SEQ_TAG { - return cty.NilType, parseEventErrorf(evt, "can't interpret sequence as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - var atys []cty.Type - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilType, parserError(p) - } - if nextEvt.typ == yaml_SEQUENCE_END_EVENT { - ty := cty.Tuple(atys) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - valTy, err := c.impliedTypeParseRemainder(an, &nextEvt, p) - if err != nil { - return cty.NilType, err - } - - atys = append(atys, valTy) - } -} - -func (c *Converter) impliedTypeAlias(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - ty, err := an.anchorType(string(evt.anchor)) - if err != nil { - err = parseEventErrorWrap(evt, err) - } - return ty, err -} - -type typeAnalysis struct { - anchorsPending map[string]int - anchorTypes map[string]cty.Type -} - -func (an *typeAnalysis) beginAnchor(name string) { - an.anchorsPending[name]++ -} - -func (an *typeAnalysis) completeAnchor(name string, ty cty.Type) { - an.anchorsPending[name]-- - if an.anchorsPending[name] == 0 { - delete(an.anchorsPending, name) - } - an.anchorTypes[name] = ty -} - -func (an *typeAnalysis) anchorType(name string) (cty.Type, error) { - if _, pending := an.anchorsPending[name]; pending { - // YAML normally allows self-referencing structures, but cty cannot - // represent them (it requires all structures to be finite) so we - // must fail here. - return cty.NilType, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) - } - ty, ok := an.anchorTypes[name] - if !ok { - return cty.NilType, fmt.Errorf("reference to undefined anchor %q", name) - } - return ty, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/parserc.go b/vendor/github.com/zclconf/go-cty-yaml/parserc.go deleted file mode 100644 index 81d05dfe..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/readerc.go b/vendor/github.com/zclconf/go-cty-yaml/readerc.go deleted file mode 100644 index 7c1f5fac..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/resolve.go b/vendor/github.com/zclconf/go-cty-yaml/resolve.go deleted file mode 100644 index 0f643834..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/resolve.go +++ /dev/null @@ -1,288 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/zclconf/go-cty/cty" -) - -type resolveMapItem struct { - value cty.Value - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v cty.Value - tag string - l []string - }{ - {cty.True, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {cty.True, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {cty.True, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {cty.False, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {cty.False, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {cty.False, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {cty.NullVal(cty.DynamicPseudoType), yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {cty.NegativeInfinity, yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG, yaml_BINARY_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func (c *Converter) resolveScalar(tag string, src string, style yaml_scalar_style_t) (cty.Value, error) { - if !resolvableTag(tag) { - return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) - } - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if src != "" { - hint = resolveTable[src[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE || style == yaml_DOUBLE_QUOTED_SCALAR_STYLE { - return cty.StringVal(src), nil - } - - // Handle things we can lookup in a map. - if item, ok := resolveMap[src]; ok { - return item.value, nil - } - - if tag == "" { - for _, nan := range []string{".nan", ".NaN", ".NAN"} { - if src == nan { - // cty cannot represent NaN, so this is an error - return cty.NilVal, fmt.Errorf("floating point NaN is not supported") - } - } - } - - // Base 60 floats are intentionally not supported. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - if numberVal, err := cty.ParseNumberVal(src); err == nil { - return numberVal, nil - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(src) - if ok { - // cty has no timestamp type, but its functions stdlib - // conventionally uses strings in an RFC3339 encoding - // to represent time, so we'll follow that convention here. - return cty.StringVal(t.Format(time.RFC3339)), nil - } - } - - plain := strings.Replace(src, "_", "", -1) - if numberVal, err := cty.ParseNumberVal(plain); err == nil { - return numberVal, nil - } - if strings.HasPrefix(plain, "0b") || strings.HasPrefix(plain, "-0b") { - tag = yaml_INT_TAG // will handle parsing below in our tag switch - } - default: - panic(fmt.Sprintf("cannot resolve tag %q with source %q", tag, src)) - } - } - - if tag == "" && src == "<<" { - return mergeMappingVal, nil - } - - switch tag { - case yaml_STR_TAG, yaml_BINARY_TAG: - // If it's binary then we want to keep the base64 representation, because - // cty has no binary type, but we will check that it's actually base64. - if tag == yaml_BINARY_TAG { - _, err := base64.StdEncoding.DecodeString(src) - if err != nil { - return cty.NilVal, fmt.Errorf("cannot parse %q as %s: not valid base64", src, tag) - } - } - return cty.StringVal(src), nil - case yaml_BOOL_TAG: - item, ok := resolveMap[src] - if !ok || item.tag != yaml_BOOL_TAG { - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - } - return item.value, nil - case yaml_FLOAT_TAG, yaml_INT_TAG: - // Note: We don't actually check that a value tagged INT is a whole - // number here. We could, but cty generally doesn't care about the - // int/float distinction, so we'll just be generous and accept it. - plain := strings.Replace(src, "_", "", -1) - if numberVal, err := cty.ParseNumberVal(plain); err == nil { // handles decimal integers and floats - return numberVal, nil - } - if intv, err := strconv.ParseInt(plain, 0, 64); err == nil { // handles 0x and 00 prefixes - return cty.NumberIntVal(intv), nil - } - if uintv, err := strconv.ParseUint(plain, 0, 64); err == nil { // handles 0x and 00 prefixes - return cty.NumberUIntVal(uintv), nil - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - return cty.NumberIntVal(intv), nil - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return cty.NumberUIntVal(uintv), nil - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) - if err == nil { - return cty.NumberIntVal(intv), nil - } - } - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - case yaml_TIMESTAMP_TAG: - t, ok := parseTimestamp(src) - if ok { - // cty has no timestamp type, but its functions stdlib - // conventionally uses strings in an RFC3339 encoding - // to represent time, so we'll follow that convention here. - return cty.StringVal(t.Format(time.RFC3339)), nil - } - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - case yaml_NULL_TAG: - return cty.NullVal(cty.DynamicPseudoType), nil - case "": - return cty.StringVal(src), nil - default: - return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) - } -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} - -type mergeMapping struct{} - -var mergeMappingTy = cty.Capsule("merge mapping", reflect.TypeOf(mergeMapping{})) -var mergeMappingVal = cty.CapsuleVal(mergeMappingTy, &mergeMapping{}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go deleted file mode 100644 index 077fd1dd..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go +++ /dev/null @@ -1,2696 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/writerc.go b/vendor/github.com/zclconf/go-cty-yaml/writerc.go deleted file mode 100644 index a2dde608..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yaml.go b/vendor/github.com/zclconf/go-cty-yaml/yaml.go deleted file mode 100644 index 2c314cc1..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yaml.go +++ /dev/null @@ -1,215 +0,0 @@ -// Package yaml can marshal and unmarshal cty values in YAML format. -package yaml - -import ( - "errors" - "fmt" - "reflect" - "strings" - "sync" - - "github.com/zclconf/go-cty/cty" -) - -// Unmarshal reads the document found within the given source buffer -// and attempts to convert it into a value conforming to the given type -// constraint. -// -// This is an alias for Unmarshal on the predefined Converter in "Standard". -// -// An error is returned if the given source contains any YAML document -// delimiters. -func Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - return Standard.Unmarshal(src, ty) -} - -// Marshal serializes the given value into a YAML document, using a fixed -// mapping from cty types to YAML constructs. -// -// This is an alias for Marshal on the predefined Converter in "Standard". -// -// Note that unlike the function of the same name in the cty JSON package, -// this does not take a type constraint and therefore the YAML serialization -// cannot preserve late-bound type information in the serialization to be -// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type -// constraint given to Unmarshal will be decoded as if the corresponding portion -// of the input were processed with ImpliedType to find a target type. -func Marshal(v cty.Value) ([]byte, error) { - return Standard.Marshal(v) -} - -// ImpliedType analyzes the given source code and returns a suitable type that -// it could be decoded into. -// -// For a converter that is using standard YAML rather than cty-specific custom -// tags, only a subset of cty types can be produced: strings, numbers, bools, -// tuple types, and object types. -// -// This is an alias for ImpliedType on the predefined Converter in "Standard". -func ImpliedType(src []byte) (cty.Type, error) { - return Standard.ImpliedType(src) -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go deleted file mode 100644 index e25cee56..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go +++ /dev/null @@ -1,738 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go deleted file mode 100644 index 8110ce3c..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 2b00ddba..00000000 --- a/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e97..00000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go deleted file mode 100644 index fc311609..00000000 --- a/vendor/golang.org/x/crypto/bcrypt/base64.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import "encoding/base64" - -const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - -var bcEncoding = base64.NewEncoding(alphabet) - -func base64Encode(src []byte) []byte { - n := bcEncoding.EncodedLen(len(src)) - dst := make([]byte, n) - bcEncoding.Encode(dst, src) - for dst[n-1] == '=' { - n-- - } - return dst[:n] -} - -func base64Decode(src []byte) ([]byte, error) { - numOfEquals := 4 - (len(src) % 4) - for i := 0; i < numOfEquals; i++ { - src = append(src, '=') - } - - dst := make([]byte, bcEncoding.DecodedLen(len(src))) - n, err := bcEncoding.Decode(dst, src) - if err != nil { - return nil, err - } - return dst[:n], nil -} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go deleted file mode 100644 index aeb73f81..00000000 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing -// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt // import "golang.org/x/crypto/bcrypt" - -// The code is a port of Provos and Mazières's C implementation. -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "io" - "strconv" - - "golang.org/x/crypto/blowfish" -) - -const ( - MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword - MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword - DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword -) - -// The error returned from CompareHashAndPassword when a password and hash do -// not match. -var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") - -// The error returned from CompareHashAndPassword when a hash is too short to -// be a bcrypt hash. -var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") - -// The error returned from CompareHashAndPassword when a hash was created with -// a bcrypt algorithm newer than this implementation. -type HashVersionTooNewError byte - -func (hv HashVersionTooNewError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) -} - -// The error returned from CompareHashAndPassword when a hash starts with something other than '$' -type InvalidHashPrefixError byte - -func (ih InvalidHashPrefixError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) -} - -type InvalidCostError int - -func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) -} - -const ( - majorVersion = '2' - minorVersion = 'a' - maxSaltSize = 16 - maxCryptedHashSize = 23 - encodedSaltSize = 22 - encodedHashSize = 31 - minHashSize = 59 -) - -// magicCipherData is an IV for the 64 Blowfish encryption calls in -// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. -var magicCipherData = []byte{ - 0x4f, 0x72, 0x70, 0x68, - 0x65, 0x61, 0x6e, 0x42, - 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x53, - 0x63, 0x72, 0x79, 0x44, - 0x6f, 0x75, 0x62, 0x74, -} - -type hashed struct { - hash []byte - salt []byte - cost int // allowed range is MinCost to MaxCost - major byte - minor byte -} - -// GenerateFromPassword returns the bcrypt hash of the password at the given -// cost. If the cost given is less than MinCost, the cost will be set to -// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, -// to compare the returned hashed password with its cleartext version. -func GenerateFromPassword(password []byte, cost int) ([]byte, error) { - p, err := newFromPassword(password, cost) - if err != nil { - return nil, err - } - return p.Hash(), nil -} - -// CompareHashAndPassword compares a bcrypt hashed password with its possible -// plaintext equivalent. Returns nil on success, or an error on failure. -func CompareHashAndPassword(hashedPassword, password []byte) error { - p, err := newFromHash(hashedPassword) - if err != nil { - return err - } - - otherHash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return err - } - - otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} - if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Cost returns the hashing cost used to create the given hashed -// password. When, in the future, the hashing cost of a password system needs -// to be increased in order to adjust for greater computational power, this -// function allows one to establish which passwords need to be updated. -func Cost(hashedPassword []byte) (int, error) { - p, err := newFromHash(hashedPassword) - if err != nil { - return 0, err - } - return p.cost, nil -} - -func newFromPassword(password []byte, cost int) (*hashed, error) { - if cost < MinCost { - cost = DefaultCost - } - p := new(hashed) - p.major = majorVersion - p.minor = minorVersion - - err := checkCost(cost) - if err != nil { - return nil, err - } - p.cost = cost - - unencodedSalt := make([]byte, maxSaltSize) - _, err = io.ReadFull(rand.Reader, unencodedSalt) - if err != nil { - return nil, err - } - - p.salt = base64Encode(unencodedSalt) - hash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return nil, err - } - p.hash = hash - return p, err -} - -func newFromHash(hashedSecret []byte) (*hashed, error) { - if len(hashedSecret) < minHashSize { - return nil, ErrHashTooShort - } - p := new(hashed) - n, err := p.decodeVersion(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - n, err = p.decodeCost(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - - // The "+2" is here because we'll have to append at most 2 '=' to the salt - // when base64 decoding it in expensiveBlowfishSetup(). - p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) - copy(p.salt, hashedSecret[:encodedSaltSize]) - - hashedSecret = hashedSecret[encodedSaltSize:] - p.hash = make([]byte, len(hashedSecret)) - copy(p.hash, hashedSecret) - - return p, nil -} - -func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { - cipherData := make([]byte, len(magicCipherData)) - copy(cipherData, magicCipherData) - - c, err := expensiveBlowfishSetup(password, uint32(cost), salt) - if err != nil { - return nil, err - } - - for i := 0; i < 24; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) - } - } - - // Bug compatibility with C bcrypt implementations. We only encode 23 of - // the 24 bytes encrypted. - hsh := base64Encode(cipherData[:maxCryptedHashSize]) - return hsh, nil -} - -func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - csalt, err := base64Decode(salt) - if err != nil { - return nil, err - } - - // Bug compatibility with C bcrypt implementations. They use the trailing - // NULL in the key string during expansion. - // We copy the key to prevent changing the underlying array. - ckey := append(key[:len(key):len(key)], 0) - - c, err := blowfish.NewSaltedCipher(ckey, csalt) - if err != nil { - return nil, err - } - - var i, rounds uint64 - rounds = 1 << cost - for i = 0; i < rounds; i++ { - blowfish.ExpandKey(ckey, c) - blowfish.ExpandKey(csalt, c) - } - - return c, nil -} - -func (p *hashed) Hash() []byte { - arr := make([]byte, 60) - arr[0] = '$' - arr[1] = p.major - n := 2 - if p.minor != 0 { - arr[2] = p.minor - n = 3 - } - arr[n] = '$' - n++ - copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) - n += 2 - arr[n] = '$' - n++ - copy(arr[n:], p.salt) - n += encodedSaltSize - copy(arr[n:], p.hash) - n += encodedHashSize - return arr[:n] -} - -func (p *hashed) decodeVersion(sbytes []byte) (int, error) { - if sbytes[0] != '$' { - return -1, InvalidHashPrefixError(sbytes[0]) - } - if sbytes[1] > majorVersion { - return -1, HashVersionTooNewError(sbytes[1]) - } - p.major = sbytes[1] - n := 3 - if sbytes[2] != '$' { - p.minor = sbytes[2] - n++ - } - return n, nil -} - -// sbytes should begin where decodeVersion left off. -func (p *hashed) decodeCost(sbytes []byte) (int, error) { - cost, err := strconv.Atoi(string(sbytes[0:2])) - if err != nil { - return -1, err - } - err = checkCost(cost) - if err != nil { - return -1, err - } - p.cost = cost - return 3, nil -} - -func (p *hashed) String() string { - return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) -} - -func checkCost(cost int) error { - if cost < MinCost || cost > MaxCost { - return InvalidCostError(cost) - } - return nil -} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go deleted file mode 100644 index 9d80f195..00000000 --- a/vendor/golang.org/x/crypto/blowfish/block.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -// getNextWord returns the next big-endian uint32 value from the byte slice -// at the given position in a circular manner, updating the position. -func getNextWord(b []byte, pos *int) uint32 { - var w uint32 - j := *pos - for i := 0; i < 4; i++ { - w = w<<8 | uint32(b[j]) - j++ - if j >= len(b) { - j = 0 - } - } - *pos = j - return w -} - -// ExpandKey performs a key expansion on the given *Cipher. Specifically, it -// performs the Blowfish algorithm's key schedule which sets up the *Cipher's -// pi and substitution tables for calls to Encrypt. This is used, primarily, -// by the bcrypt package to reuse the Blowfish key schedule during its -// set up. It's unlikely that you need to use this directly. -func ExpandKey(key []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - // Using inlined getNextWord for performance. - var d uint32 - for k := 0; k < 4; k++ { - d = d<<8 | uint32(key[j]) - j++ - if j >= len(key) { - j = 0 - } - } - c.p[i] ^= d - } - - var l, r uint32 - for i := 0; i < 18; i += 2 { - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -// This is similar to ExpandKey, but folds the salt during the key -// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero -// salt passed in, reusing ExpandKey turns out to be a place of inefficiency -// and specializing it here is useful. -func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - c.p[i] ^= getNextWord(key, &j) - } - - j = 0 - var l, r uint32 - for i := 0; i < 18; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[0] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] - xr ^= c.p[17] - return xr, xl -} - -func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[17] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] - xr ^= c.p[0] - return xr, xl -} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go deleted file mode 100644 index 213bf204..00000000 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -// -// Blowfish is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" - -// The code is a port of Bruce Schneier's C implementation. -// See https://www.schneier.com/blowfish.html. - -import "strconv" - -// The Blowfish block size in bytes. -const BlockSize = 8 - -// A Cipher is an instance of Blowfish encryption using a particular key. -type Cipher struct { - p [18]uint32 - s0, s1, s2, s3 [256]uint32 -} - -type KeySizeError int - -func (k KeySizeError) Error() string { - return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) -} - -// NewCipher creates and returns a Cipher. -// The key argument should be the Blowfish key, from 1 to 56 bytes. -func NewCipher(key []byte) (*Cipher, error) { - var result Cipher - if k := len(key); k < 1 || k > 56 { - return nil, KeySizeError(k) - } - initCipher(&result) - ExpandKey(key, &result) - return &result, nil -} - -// NewSaltedCipher creates a returns a Cipher that folds a salt into its key -// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatibility, the key can be over 56 -// bytes. -func NewSaltedCipher(key, salt []byte) (*Cipher, error) { - if len(salt) == 0 { - return NewCipher(key) - } - var result Cipher - if k := len(key); k < 1 { - return nil, KeySizeError(k) - } - initCipher(&result) - expandKeyWithSalt(key, salt, &result) - return &result, nil -} - -// BlockSize returns the Blowfish block size, 8 bytes. -// It is necessary to satisfy the Block interface in the -// package "crypto/cipher". -func (c *Cipher) BlockSize() int { return BlockSize } - -// Encrypt encrypts the 8-byte buffer src using the key k -// and stores the result in dst. -// Note that for amounts of data larger than a block, -// it is not safe to just call Encrypt on successive blocks; -// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = encryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -// Decrypt decrypts the 8-byte buffer src using the key k -// and stores the result in dst. -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = decryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -func initCipher(c *Cipher) { - copy(c.p[0:], p[0:]) - copy(c.s0[0:], s0[0:]) - copy(c.s1[0:], s1[0:]) - copy(c.s2[0:], s2[0:]) - copy(c.s3[0:], s3[0:]) -} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go deleted file mode 100644 index d0407759..00000000 --- a/vendor/golang.org/x/crypto/blowfish/const.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The startup permutation array and substitution boxes. -// They are the hexadecimal digits of PI; see: -// https://www.schneier.com/code/constants.txt. - -package blowfish - -var s0 = [256]uint32{ - 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, - 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, - 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, - 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, - 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, - 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, - 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, - 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, - 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, - 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, - 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, - 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, - 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, - 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, - 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, - 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, - 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, - 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, - 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, - 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, - 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, - 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, - 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, - 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, - 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, - 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, - 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, - 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, - 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, - 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, - 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, - 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, - 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, - 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, - 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, - 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, - 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, - 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, - 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, - 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, - 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, - 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, - 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, -} - -var s1 = [256]uint32{ - 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, - 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, - 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, - 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, - 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, - 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, - 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, - 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, - 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, - 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, - 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, - 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, - 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, - 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, - 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, - 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, - 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, - 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, - 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, - 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, - 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, - 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, - 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, - 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, - 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, - 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, - 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, - 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, - 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, - 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, - 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, - 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, - 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, - 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, - 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, - 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, - 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, - 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, - 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, - 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, - 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, - 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, - 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, -} - -var s2 = [256]uint32{ - 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, - 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, - 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, - 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, - 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, - 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, - 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, - 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, - 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, - 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, - 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, - 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, - 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, - 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, - 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, - 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, - 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, - 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, - 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, - 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, - 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, - 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, - 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, - 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, - 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, - 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, - 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, - 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, - 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, - 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, - 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, - 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, - 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, - 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, - 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, - 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, - 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, - 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, - 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, - 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, - 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, - 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, - 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, -} - -var s3 = [256]uint32{ - 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, - 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, - 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, - 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, - 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, - 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, - 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, - 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, - 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, - 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, - 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, - 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, - 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, - 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, - 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, - 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, - 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, - 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, - 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, - 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, - 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, - 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, - 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, - 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, - 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, - 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, - 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, - 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, - 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, - 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, - 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, - 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, - 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, - 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, - 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, - 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, - 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, - 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, - 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, - 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, - 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, - 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, - 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, -} - -var p = [18]uint32{ - 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, - 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, - 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, -} diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go deleted file mode 100644 index ddcbeb6f..00000000 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cast5 implements CAST5, as defined in RFC 2144. -// -// CAST5 is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package cast5 // import "golang.org/x/crypto/cast5" - -import "errors" - -const BlockSize = 8 -const KeySize = 16 - -type Cipher struct { - masking [16]uint32 - rotate [16]uint8 -} - -func NewCipher(key []byte) (c *Cipher, err error) { - if len(key) != KeySize { - return nil, errors.New("CAST5: keys must be 16 bytes") - } - - c = new(Cipher) - c.keySchedule(key) - return -} - -func (c *Cipher) BlockSize() int { - return BlockSize -} - -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -type keyScheduleA [4][7]uint8 -type keyScheduleB [4][5]uint8 - -// keyScheduleRound contains the magic values for a round of the key schedule. -// The keyScheduleA deals with the lines like: -// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] -// Conceptually, both x and z are in the same array, x first. The first -// element describes which word of this array gets written to and the -// second, which word gets read. So, for the line above, it's "4, 0", because -// it's writing to the first word of z, which, being after x, is word 4, and -// reading from the first word of x: word 0. -// -// Next are the indexes into the S-boxes. Now the array is treated as bytes. So -// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear -// that it's z that we're indexing. -// -// keyScheduleB deals with lines like: -// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] -// "K1" is ignored because key words are always written in order. So the five -// elements are the S-box indexes. They use the same form as in keyScheduleA, -// above. - -type keyScheduleRound struct{} -type keySchedule []keyScheduleRound - -var schedule = []struct { - a keyScheduleA - b keyScheduleB -}{ - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, - {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, - {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, - {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {3, 2, 0xc, 0xd, 8}, - {1, 0, 0xe, 0xf, 0xd}, - {7, 6, 8, 9, 3}, - {5, 4, 0xa, 0xb, 7}, - }, - }, - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, - {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, - {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, - {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {8, 9, 7, 6, 3}, - {0xa, 0xb, 5, 4, 7}, - {0xc, 0xd, 3, 2, 8}, - {0xe, 0xf, 1, 0, 0xd}, - }, - }, -} - -func (c *Cipher) keySchedule(in []byte) { - var t [8]uint32 - var k [32]uint32 - - for i := 0; i < 4; i++ { - j := i * 4 - t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) - } - - x := []byte{6, 7, 4, 5} - ki := 0 - - for half := 0; half < 2; half++ { - for _, round := range schedule { - for j := 0; j < 4; j++ { - var a [7]uint8 - copy(a[:], round.a[j][:]) - w := t[a[1]] - w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] - w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] - w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] - w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] - w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] - t[a[0]] = w - } - - for j := 0; j < 4; j++ { - var b [5]uint8 - copy(b[:], round.b[j][:]) - w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] - w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] - w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] - w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] - w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] - k[ki] = w - ki++ - } - } - } - - for i := 0; i < 16; i++ { - c.masking[i] = k[i] - c.rotate[i] = uint8(k[16+i] & 0x1f) - } -} - -// These are the three 'f' functions. See RFC 2144, section 2.2. -func f1(d, m uint32, r uint8) uint32 { - t := m + d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] -} - -func f2(d, m uint32, r uint8) uint32 { - t := m ^ d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] -} - -func f3(d, m uint32, r uint8) uint32 { - t := m - d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] -} - -var sBox = [8][256]uint32{ - { - 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, - 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, - 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, - 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, - 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, - 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, - 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, - 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, - 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, - 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, - 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, - 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, - 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, - 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, - 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, - 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, - 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, - 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, - 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, - 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, - 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, - 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, - 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, - 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, - 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, - 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, - 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, - 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, - 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, - 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, - 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, - 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, - }, - { - 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, - 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, - 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, - 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, - 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, - 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, - 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, - 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, - 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, - 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, - 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, - 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, - 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, - 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, - 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, - 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, - 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, - 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, - 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, - 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, - 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, - 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, - 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, - 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, - 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, - 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, - 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, - 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, - 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, - 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, - 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, - 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, - }, - { - 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, - 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, - 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, - 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, - 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, - 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, - 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, - 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, - 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, - 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, - 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, - 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, - 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, - 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, - 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, - 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, - 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, - 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, - 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, - 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, - 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, - 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, - 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, - 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, - 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, - 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, - 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, - 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, - 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, - 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, - 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, - 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, - }, - { - 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, - 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, - 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, - 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, - 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, - 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, - 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, - 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, - 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, - 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, - 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, - 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, - 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, - 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, - 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, - 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, - 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, - 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, - 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, - 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, - 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, - 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, - 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, - 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, - 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, - 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, - 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, - 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, - 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, - 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, - 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, - 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, - }, - { - 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, - 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, - 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, - 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, - 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, - 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, - 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, - 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, - 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, - 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, - 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, - 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, - 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, - 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, - 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, - 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, - 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, - 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, - 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, - 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, - 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, - 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, - 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, - 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, - 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, - 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, - 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, - 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, - 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, - 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, - 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, - 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, - }, - { - 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, - 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, - 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, - 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, - 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, - 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, - 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, - 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, - 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, - 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, - 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, - 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, - 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, - 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, - 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, - 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, - 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, - 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, - 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, - 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, - 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, - 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, - 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, - 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, - 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, - 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, - 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, - 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, - 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, - 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, - 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, - 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, - }, - { - 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, - 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, - 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, - 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, - 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, - 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, - 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, - 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, - 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, - 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, - 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, - 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, - 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, - 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, - 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, - 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, - 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, - 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, - 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, - 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, - 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, - 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, - 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, - 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, - 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, - 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, - 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, - 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, - 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, - 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, - 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, - 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, - }, - { - 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, - 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, - 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, - 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, - 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, - 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, - 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, - 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, - 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, - 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, - 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, - 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, - 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, - 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, - 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, - 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, - 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, - 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, - 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, - 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, - 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, - 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, - 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, - 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, - 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, - 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, - 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, - 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, - 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, - 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, - 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, - 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, - }, -} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go deleted file mode 100644 index 592d1864..00000000 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is -// very similar to PEM except that it has an additional CRC checksum. -package armor // import "golang.org/x/crypto/openpgp/armor" - -import ( - "bufio" - "bytes" - "encoding/base64" - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// A Block represents an OpenPGP armored structure. -// -// The encoded form is: -// -----BEGIN Type----- -// Headers -// -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- -// where Headers is a possibly empty sequence of Key: Value lines. -// -// Since the armored data can be very large, this package presents a streaming -// interface. -type Block struct { - Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). - Header map[string]string // Optional headers. - Body io.Reader // A Reader from which the contents can be read - lReader lineReader - oReader openpgpReader -} - -var ArmorCorrupt error = errors.StructuralError("armor invalid") - -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - -var armorStart = []byte("-----BEGIN ") -var armorEnd = []byte("-----END ") -var armorEndOfLine = []byte("-----") - -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 -} - -func (l *lineReader) Read(p []byte) (n int, err error) { - if l.eof { - return 0, io.EOF - } - - if len(l.buf) > 0 { - n = copy(p, l.buf) - l.buf = l.buf[n:] - return - } - - line, isPrefix, err := l.in.ReadLine() - if err != nil { - return - } - if isPrefix { - return 0, ArmorCorrupt - } - - if len(line) == 5 && line[0] == '=' { - // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - return 0, io.EOF - } - - if len(line) > 96 { - return 0, ArmorCorrupt - } - - n = copy(p, line) - bytesToSave := len(line) - n - if bytesToSave > 0 { - if cap(l.buf) < bytesToSave { - l.buf = make([]byte, 0, bytesToSave) - } - l.buf = l.buf[0:bytesToSave] - copy(l.buf, line[n:]) - } - - return -} - -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 -} - -func (r *openpgpReader) Read(p []byte) (n int, err error) { - n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF { - if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - } - - return -} - -// Decode reads a PGP armored block from the given Reader. It will ignore -// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The -// given Reader is not usable after calling this function: an arbitrary amount -// of data may have been read past the end of the block. -func Decode(in io.Reader) (p *Block, err error) { - r := bufio.NewReaderSize(in, 100) - var line []byte - ignoreNext := false - -TryNextBlock: - p = nil - - // Skip leading garbage - for { - ignoreThis := ignoreNext - line, ignoreNext, err = r.ReadLine() - if err != nil { - return - } - if ignoreNext || ignoreThis { - continue - } - line = bytes.TrimSpace(line) - if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { - break - } - } - - p = new(Block) - p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) - p.Header = make(map[string]string) - nextIsContinuation := false - var lastKey string - - // Read headers - for { - isContinuation := nextIsContinuation - line, nextIsContinuation, err = r.ReadLine() - if err != nil { - p = nil - return - } - if isContinuation { - p.Header[lastKey] += string(line) - continue - } - line = bytes.TrimSpace(line) - if len(line) == 0 { - break - } - - i := bytes.Index(line, []byte(": ")) - if i == -1 { - goto TryNextBlock - } - lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) - } - - p.lReader.in = r - p.oReader.currentCRC = crc24Init - p.oReader.lReader = &p.lReader - p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) - p.Body = &p.oReader - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go deleted file mode 100644 index 6f07582c..00000000 --- a/vendor/golang.org/x/crypto/openpgp/armor/encode.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "encoding/base64" - "io" -) - -var armorHeaderSep = []byte(": ") -var blockEnd = []byte("\n=") -var newline = []byte("\n") -var armorEndOfLineOut = []byte("-----\n") - -// writeSlices writes its arguments to the given Writer. -func writeSlices(out io.Writer, slices ...[]byte) (err error) { - for _, s := range slices { - _, err = out.Write(s) - if err != nil { - return err - } - } - return -} - -// lineBreaker breaks data across several lines, all of the same byte length -// (except possibly the last). Lines are broken with a single '\n'. -type lineBreaker struct { - lineLength int - line []byte - used int - out io.Writer - haveWritten bool -} - -func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { - return &lineBreaker{ - lineLength: lineLength, - line: make([]byte, lineLength), - used: 0, - out: out, - } -} - -func (l *lineBreaker) Write(b []byte) (n int, err error) { - n = len(b) - - if n == 0 { - return - } - - if l.used == 0 && l.haveWritten { - _, err = l.out.Write([]byte{'\n'}) - if err != nil { - return - } - } - - if l.used+len(b) < l.lineLength { - l.used += copy(l.line[l.used:], b) - return - } - - l.haveWritten = true - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - excess := l.lineLength - l.used - l.used = 0 - - _, err = l.out.Write(b[0:excess]) - if err != nil { - return - } - - _, err = l.Write(b[excess:]) - return -} - -func (l *lineBreaker) Close() (err error) { - if l.used > 0 { - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - } - - return -} - -// encoding keeps track of a running CRC24 over the data which has been written -// to it and outputs a OpenPGP checksum when closed, followed by an armor -// trailer. -// -// It's built into a stack of io.Writers: -// encoding -> base64 encoder -> lineBreaker -> out -type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) - return e.b64.Write(data) -} - -func (e *encoding) Close() (err error) { - err = e.b64.Close() - if err != nil { - return - } - e.breaker.Close() - - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { - bType := []byte(blockType) - err = writeSlices(out, armorStart, bType, armorEndOfLineOut) - if err != nil { - return - } - - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) - if err != nil { - return - } - } - - _, err = out.Write(newline) - if err != nil { - return - } - - e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, - } - e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) - return e, nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/golang.org/x/crypto/openpgp/canonical_text.go deleted file mode 100644 index e601e389..00000000 --- a/vendor/golang.org/x/crypto/openpgp/canonical_text.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import "hash" - -// NewCanonicalTextHash reformats text written to it into the canonical -// form and then applies the hash h. See RFC 4880, section 5.2.1. -func NewCanonicalTextHash(h hash.Hash) hash.Hash { - return &canonicalTextHash{h, 0} -} - -type canonicalTextHash struct { - h hash.Hash - s int -} - -var newline = []byte{'\r', '\n'} - -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { - start := 0 - - for i, c := range buf { - switch cth.s { - case 0: - if c == '\r' { - cth.s = 1 - } else if c == '\n' { - cth.h.Write(buf[start:i]) - cth.h.Write(newline) - start = i + 1 - } - case 1: - cth.s = 0 - } - } - - cth.h.Write(buf[start:]) - return len(buf), nil -} - -func (cth *canonicalTextHash) Sum(in []byte) []byte { - return cth.h.Sum(in) -} - -func (cth *canonicalTextHash) Reset() { - cth.h.Reset() - cth.s = 0 -} - -func (cth *canonicalTextHash) Size() int { - return cth.h.Size() -} - -func (cth *canonicalTextHash) BlockSize() int { - return cth.h.BlockSize() -} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go deleted file mode 100644 index 73f4fe37..00000000 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package elgamal implements ElGamal encryption, suitable for OpenPGP, -// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on -// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, -// n. 4, 1985, pp. 469-472. -// -// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it -// unsuitable for other protocols. RSA should be used in preference in any -// case. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" - -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" -) - -// PublicKey represents an ElGamal public key. -type PublicKey struct { - G, P, Y *big.Int -} - -// PrivateKey represents an ElGamal private key. -type PrivateKey struct { - PublicKey - X *big.Int -} - -// Encrypt encrypts the given message to the given public key. The result is a -// pair of integers. Errors can result from reading random, or because msg is -// too large to be encrypted to the public key. -func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { - pLen := (pub.P.BitLen() + 7) / 8 - if len(msg) > pLen-11 { - err = errors.New("elgamal: message too long") - return - } - - // EM = 0x02 || PS || 0x00 || M - em := make([]byte, pLen-1) - em[0] = 2 - ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] - err = nonZeroRandomBytes(ps, random) - if err != nil { - return - } - em[len(em)-len(msg)-1] = 0 - copy(mm, msg) - - m := new(big.Int).SetBytes(em) - - k, err := rand.Int(random, pub.P) - if err != nil { - return - } - - c1 = new(big.Int).Exp(pub.G, k, pub.P) - s := new(big.Int).Exp(pub.Y, k, pub.P) - c2 = s.Mul(s, m) - c2.Mod(c2, pub.P) - - return -} - -// Decrypt takes two integers, resulting from an ElGamal encryption, and -// returns the plaintext of the message. An error can result only if the -// ciphertext is invalid. Users should keep in mind that this is a padding -// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel -// Bleichenbacher, Advances in Cryptology (Crypto '98), -func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { - s := new(big.Int).Exp(c1, priv.X, priv.P) - s.ModInverse(s, priv.P) - s.Mul(s, c2) - s.Mod(s, priv.P) - em := s.Bytes() - - firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) - - // The remainder of the plaintext must be a string of non-zero random - // octets, followed by a 0, followed by the message. - // lookingForIndex: 1 iff we are still looking for the zero. - // index: the offset of the first zero byte. - var lookingForIndex, index int - lookingForIndex = 1 - - for i := 1; i < len(em); i++ { - equals0 := subtle.ConstantTimeByteEq(em[i], 0) - index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) - } - - if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { - return nil, errors.New("elgamal: decryption error") - } - return em[index+1:], nil -} - -// nonZeroRandomBytes fills the given slice with non-zero random octets. -func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { - _, err = io.ReadFull(rand, s) - if err != nil { - return - } - - for i := 0; i < len(s); i++ { - for s[i] == 0 { - _, err = io.ReadFull(rand, s[i:i+1]) - if err != nil { - return - } - } - } - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go deleted file mode 100644 index eb0550b2..00000000 --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -package errors // import "golang.org/x/crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go deleted file mode 100644 index faa2fb36..00000000 --- a/vendor/golang.org/x/crypto/openpgp/keys.go +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto/rsa" - "io" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Signatures []*packet.Signature -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdAndUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// primaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. -func (e *Entity) primaryIdentity() *Identity { - var firstIdentity *Identity - for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident - } - } - return firstIdentity -} - -// encryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) encryptionKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - // Iterate the keys to find the newest key - var maxTime time.Time - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagEncryptCommunications && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.Sig.KeyExpired(now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt to, then we can obviously use it. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - // This Entity appears to be signing only. - return Key{}, false -} - -// signingKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) signingKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagSign && - subkey.PublicKey.PubKeyAlgo.CanSign() && - !subkey.Sig.KeyExpired(now) { - candidateSubkey = i - break - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - return Key{}, false -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// KeysByIdAndUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { - var usage byte - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } - e.PrimaryKey = &e.PrivateKey.PublicKey - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - switch pkt := p.(type) { - case *packet.UserId: - if err := addUserID(e, packets, pkt); err != nil { - return nil, err - } - case *packet.Signature: - if pkt.SigType == packet.SigTypeKeyRevocation { - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } - // Else, ignoring the signature as it does not follow anything - // we would know to attach it to. - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { - // Make a new Identity object, that we might wind up throwing away. - // We'll only add it if we get a valid self-signature over this - // userID. - identity := new(Identity) - identity.Name = pkt.Id - identity.UserId = pkt - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { - if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { - return errors.StructuralError("user ID self-signature invalid: " + err.Error()) - } - identity.SelfSignature = sig - e.Identities[pkt.Id] = identity - } else { - identity.Signatures = append(identity.Signatures, sig) - } - } - - return nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { - return errors.StructuralError("subkey signature with wrong type") - } - - if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - switch sig.SigType { - case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig - case packet.SigTypeSubkeyBinding: - - if shouldReplaceSubkeySig(subKey.Sig, sig) { - subKey.Sig = sig - } - } - } - - if subKey.Sig == nil { - return errors.StructuralError("subkey packet not followed by signature") - } - - e.Subkeys = append(e.Subkeys, subKey) - - return nil -} - -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - -const defaultRSAKeyBits = 2048 - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - creationTime := config.Now() - - bits := defaultRSAKeyBits - if config != nil && config.RSABits != 0 { - bits = config.RSABits - } - - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - signingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - - e := &Entity{ - PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv), - Identities: make(map[string]*Identity), - } - isPrimaryId := true - e.Identities[uid.Id] = &Identity{ - Name: uid.Id, - UserId: uid, - SelfSignature: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - - // If the user passes in a DefaultHash via packet.Config, - // set the PreferredHash for the SelfSignature. - if config != nil && config.DefaultHash != 0 { - e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} - } - - // Likewise for DefaultCipher. - if config != nil && config.DefaultCipher != 0 { - e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} - } - - e.Subkeys = make([]Subkey, 1) - e.Subkeys[0] = Subkey{ - PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv), - Sig: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - e.Subkeys[0].PublicKey.IsSubkey = true - e.Subkeys[0].PrivateKey.IsSubkey = true - err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - return e, nil -} - -// SerializePrivate serializes an Entity, including private key material, but -// excluding signatures from other entities, to the given Writer. -// Identities and subkeys are re-signed in case they changed since NewEntry. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w, including -// signatures from other entities. No private key material will be output. -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := &packet.Signature{ - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, - } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go deleted file mode 100644 index e8f0b5ca..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "compress/bzip2" - "compress/flate" - "compress/zlib" - "golang.org/x/crypto/openpgp/errors" - "io" - "strconv" -) - -// Compressed represents a compressed OpenPGP packet. The decompressed contents -// will contain more OpenPGP packets. See RFC 4880, section 5.6. -type Compressed struct { - Body io.Reader -} - -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression -) - -// CompressionConfig contains compressor configuration settings. -type CompressionConfig struct { - // Level is the compression level to use. It must be set to - // between -1 and 9, with -1 causing the compressor to use the - // default compression level, 0 causing the compressor to use - // no compression and 1 to 9 representing increasing (better, - // slower) compression levels. If Level is less than -1 or - // more then 9, a non-nil error will be returned during - // encryption. See the constants above for convenient common - // settings for Level. - Level int -} - -func (c *Compressed) parse(r io.Reader) error { - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case 1: - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) - default: - err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) - } - - return err -} - -// compressedWriterCloser represents the serialized compression stream -// header and the compressor. Its Close() method ensures that both the -// compressor and serialized stream header are closed. Its Write() -// method writes to the compressor. -type compressedWriteCloser struct { - sh io.Closer // Stream Header - c io.WriteCloser // Compressor -} - -func (cwc compressedWriteCloser) Write(p []byte) (int, error) { - return cwc.c.Write(p) -} - -func (cwc compressedWriteCloser) Close() (err error) { - err = cwc.c.Close() - if err != nil { - return err - } - - return cwc.sh.Close() -} - -// SerializeCompressed serializes a compressed data packet to w and -// returns a WriteCloser to which the literal data packets themselves -// can be written and which MUST be closed on completion. If cc is -// nil, sensible defaults will be used to configure the compression -// algorithm. -func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { - compressed, err := serializeStreamHeader(w, packetTypeCompressed) - if err != nil { - return - } - - _, err = compressed.Write([]byte{uint8(algo)}) - if err != nil { - return - } - - level := DefaultCompression - if cc != nil { - level = cc.Level - } - - var compressor io.WriteCloser - switch algo { - case CompressionZIP: - compressor, err = flate.NewWriter(compressed, level) - case CompressionZLIB: - compressor, err = zlib.NewWriterLevel(compressed, level) - default: - s := strconv.Itoa(int(algo)) - err = errors.UnsupportedError("Unsupported compression algorithm: " + s) - } - if err != nil { - return - } - - literaldata = compressedWriteCloser{compressed, compressor} - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go deleted file mode 100644 index c76eecc9..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index 02b372cf..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/rsa" - "encoding/binary" - "io" - "math/big" - "strconv" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 parsedMPI -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - case PubKeyAlgoElGamal: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) - if err != nil { - return - } - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - k := priv.PrivateKey.(*rsa.PrivateKey) - b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes)) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - default: - err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = 2 + len(e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - writeMPIs(w, e.encryptedMPI1) - case PubKeyAlgoElGamal: - writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) - default: - panic("internal error") - } - - return nil -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - return writeMPI(w, 8*uint16(len(cipherText)), cipherText) -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - err = writeBig(w, c1) - if err != nil { - return err - } - return writeBig(w, c2) -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/golang.org/x/crypto/openpgp/packet/literal.go deleted file mode 100644 index 1a9ec6e5..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/literal.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "encoding/binary" - "io" -) - -// LiteralData represents an encrypted file. See RFC 4880, section 5.9. -type LiteralData struct { - IsBinary bool - FileName string - Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. - Body io.Reader -} - -// ForEyesOnly returns whether the contents of the LiteralData have been marked -// as especially sensitive. -func (l *LiteralData) ForEyesOnly() bool { - return l.FileName == "_CONSOLE" -} - -func (l *LiteralData) parse(r io.Reader) (err error) { - var buf [256]byte - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - - l.IsBinary = buf[0] == 'b' - fileNameLen := int(buf[1]) - - _, err = readFull(r, buf[:fileNameLen]) - if err != nil { - return - } - - l.FileName = string(buf[:fileNameLen]) - - _, err = readFull(r, buf[:4]) - if err != nil { - return - } - - l.Time = binary.BigEndian.Uint32(buf[:4]) - l.Body = r - return -} - -// SerializeLiteral serializes a literal data packet to w and returns a -// WriteCloser to which the data itself can be written and which MUST be closed -// on completion. The fileName is truncated to 255 bytes. -func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { - var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' - } - if len(fileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - - inner, err := serializeStreamHeader(w, packetTypeLiteralData) - if err != nil { - return - } - - _, err = inner.Write(buf[:2]) - if err != nil { - return - } - _, err = inner.Write([]byte(fileName)) - if err != nil { - return - } - binary.BigEndian.PutUint32(buf[:], time) - _, err = inner.Write(buf[:]) - if err != nil { - return - } - - plaintext = inner - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go deleted file mode 100644 index ce2a33a5..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 - -package packet - -import ( - "crypto/cipher" -) - -type ocfbEncrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// An OCFBResyncOption determines if the "resynchronization step" of OCFB is -// performed. -type OCFBResyncOption bool - -const ( - OCFBResync OCFBResyncOption = true - OCFBNoResync OCFBResyncOption = false -) - -// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block, and an initial amount of -// ciphertext. randData must be random bytes and be the same length as the -// cipher.Block's block size. Resync determines if the "resynchronization step" -// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on -// this point. -func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { - blockSize := block.BlockSize() - if len(randData) != blockSize { - return nil, nil - } - - x := &ocfbEncrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefix := make([]byte, blockSize+2) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefix[i] = randData[i] ^ x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] - prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - return x, prefix -} - -func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - x.fre[x.outUsed] ^= src[i] - dst[i] = x.fre[x.outUsed] - x.outUsed++ - } -} - -type ocfbDecrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block. Prefix must be the first -// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. If an incorrect key is detected then nil is returned. On -// successful exit, blockSize+2 bytes of decrypted data are written into -// prefix. Resync determines if the "resynchronization step" from RFC 4880, -// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. -func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { - blockSize := block.BlockSize() - if len(prefix) != blockSize+2 { - return nil - } - - x := &ocfbDecrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefixCopy := make([]byte, len(prefix)) - copy(prefixCopy, prefix) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefixCopy[i] ^= x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefixCopy[blockSize] ^= x.fre[0] - prefixCopy[blockSize+1] ^= x.fre[1] - - if prefixCopy[blockSize-2] != prefixCopy[blockSize] || - prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { - return nil - } - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - copy(prefix, prefixCopy) - return x -} - -func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - c := src[i] - dst[i] = x.fre[x.outUsed] ^ src[i] - x.fre[x.outUsed] = c - x.outUsed++ - } -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index 17135033..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" - "io" - "strconv" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go deleted file mode 100644 index 456d807f..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "io" - "io/ioutil" - - "golang.org/x/crypto/openpgp/errors" -) - -// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is -// useful for splitting and storing the original packet contents separately, -// handling unsupported packet types or accessing parts of the packet not yet -// implemented by this package. -type OpaquePacket struct { - // Packet type - Tag uint8 - // Reason why the packet was parsed opaquely - Reason error - // Binary contents of the packet data - Contents []byte -} - -func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = ioutil.ReadAll(r) - return -} - -// Serialize marshals the packet to a writer in its original form, including -// the packet header. -func (op *OpaquePacket) Serialize(w io.Writer) (err error) { - err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) - if err == nil { - _, err = w.Write(op.Contents) - } - return -} - -// Parse attempts to parse the opaque contents into a structure supported by -// this package. If the packet is not known then the result will be another -// OpaquePacket. -func (op *OpaquePacket) Parse() (p Packet, err error) { - hdr := bytes.NewBuffer(nil) - err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) - if err != nil { - op.Reason = err - return op, err - } - p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) - if err != nil { - op.Reason = err - p = op - } - return -} - -// OpaqueReader reads OpaquePackets from an io.Reader. -type OpaqueReader struct { - r io.Reader -} - -func NewOpaqueReader(r io.Reader) *OpaqueReader { - return &OpaqueReader{r: r} -} - -// Read the next OpaquePacket. -func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { - tag, _, contents, err := readHeader(or.r) - if err != nil { - return - } - op = &OpaquePacket{Tag: uint8(tag), Reason: err} - err = op.parse(contents) - if err != nil { - consumeAll(contents) - } - return -} - -// OpaqueSubpacket represents an unparsed OpenPGP subpacket, -// as found in signature and user attribute packets. -type OpaqueSubpacket struct { - SubType uint8 - Contents []byte -} - -// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from -// their byte representation. -func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { - var ( - subHeaderLen int - subPacket *OpaqueSubpacket - ) - for len(contents) > 0 { - subHeaderLen, subPacket, err = nextSubpacket(contents) - if err != nil { - break - } - result = append(result, subPacket) - contents = contents[subHeaderLen+len(subPacket.Contents):] - } - return -} - -func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { - // RFC 4880, section 5.2.3.1 - var subLen uint32 - if len(contents) < 1 { - goto Truncated - } - subPacket = &OpaqueSubpacket{} - switch { - case contents[0] < 192: - subHeaderLen = 2 // 1 length byte, 1 subtype byte - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]) - contents = contents[1:] - case contents[0] < 255: - subHeaderLen = 3 // 2 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 - contents = contents[2:] - default: - subHeaderLen = 6 // 5 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[1])<<24 | - uint32(contents[2])<<16 | - uint32(contents[3])<<8 | - uint32(contents[4]) - contents = contents[5:] - } - if subLen > uint32(len(contents)) || subLen == 0 { - goto Truncated - } - subPacket.SubType = contents[0] - subPacket.Contents = contents[1:subLen] - return -Truncated: - err = errors.StructuralError("subpacket truncated") - return -} - -func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { - buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) - buf[n] = osp.SubType - if _, err = w.Write(buf[:n+1]); err != nil { - return - } - _, err = w.Write(osp.Contents) - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go deleted file mode 100644 index 5af64c54..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,551 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -package packet // import "golang.org/x/crypto/openpgp/packet" - -import ( - "bufio" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rsa" - "io" - "math/big" - - "golang.org/x/crypto/cast5" - "golang.org/x/crypto/openpgp/errors" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - lengthByte [1]byte -} - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - for len(p) > 0 { - for power := uint(14); power < 32; power-- { - l := 1 << power - if len(p) >= l { - w.lengthByte[0] = 224 + uint8(power) - _, err = w.w.Write(w.lengthByte[:]) - if err != nil { - return - } - var m int - m, err = w.w.Write(p[:l]) - n += m - if err != nil { - return - } - p = p[l:] - break - } - } - } - return -} - -func (w *partialLengthWriter) Close() error { - w.lengthByte[0] = 0 - _, err := w.w.Write(w.lengthByte[:]) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - var buf [6]byte - var n int - - buf[0] = 0x80 | 0x40 | byte(ptype) - if length < 192 { - buf[1] = byte(length) - n = 2 - } else if length < 8384 { - length -= 192 - buf[1] = 192 + byte(length>>8) - buf[2] = byte(length) - n = 3 - } else { - buf[1] = 255 - buf[2] = byte(length >> 24) - buf[3] = byte(length >> 16) - buf[4] = byte(length >> 8) - buf[5] = byte(length) - n = 6 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 -) - -// peekVersion detects the version of a public key packet about to -// be read. A bufio.Reader at the original position of the io.Reader -// is returned. -func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { - bufr = bufio.NewReader(r) - var verBuf []byte - if verBuf, err = bufr.Peek(1); err != nil { - return - } - ver = verBuf[0] - return -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - var version byte - // Detect signature version - if contents, version, err = peekVersion(contents); err != nil { - return - } - if version < 4 { - p = new(SignatureV3) - } else { - p = new(Signature) - } - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - var version byte - if contents, version, err = peekVersion(contents); err != nil { - return - } - isSubkey := tag == packetTypePublicSubkey - if version < 4 { - p = &PublicKeyV3{IsSubkey: isSubkey} - } else { - p = &PublicKey{IsSubkey: isSubkey} - } - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - p = new(SymmetricallyEncrypted) - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: - se := new(SymmetricallyEncrypted) - se.MDC = true - p = se - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0 - SigTypeText = 1 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 - - // Deprecated in RFC 4880, Section 13.5. Use key flags instead. - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction uint8 - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case Cipher3DES: - return 24 - case CipherCAST5: - return cast5.KeySize - case CipherAES128: - return 16 - case CipherAES192: - return 24 - case CipherAES256: - return 32 - } - return 0 -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - switch cipher { - case Cipher3DES: - return des.BlockSize - case CipherCAST5: - return 8 - case CipherAES128, CipherAES192, CipherAES256: - return 16 - } - return 0 -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - switch cipher { - case Cipher3DES: - block, _ = des.NewTripleDESCipher(key) - case CipherCAST5: - block, _ = cast5.NewCipher(key) - case CipherAES128, CipherAES192, CipherAES256: - block, _ = aes.NewCipher(key) - } - return -} - -// readMPI reads a big integer from r. The bit length returned is the bit -// length that was specified in r. This is preserved so that the integer can be -// reserialized exactly. -func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { - var buf [2]byte - _, err = readFull(r, buf[0:]) - if err != nil { - return - } - bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - numBytes := (int(bitLength) + 7) / 8 - mpi = make([]byte, numBytes) - _, err = readFull(r, mpi) - // According to RFC 4880 3.2. we should check that the MPI has no leading - // zeroes (at least when not an encrypted MPI?), but this implementation - // does generate leading zeroes, so we keep accepting them. - return -} - -// writeMPI serializes a big integer to w. -func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { - // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. - // Implementations seem to be tolerant of them, and stripping them would - // make it complex to guarantee matching re-serialization. - _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) - if err == nil { - _, err = w.Write(mpiBytes) - } - return -} - -// writeBig serializes a *big.Int to w. -func writeBig(w io.Writer, i *big.Int) error { - return writeMPI(w, uint16(i.BitLen()), i.Bytes()) -} - -// padToKeySize left-pads a MPI with zeroes to match the length of the -// specified RSA public. -func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { - k := (pub.N.BitLen() + 7) / 8 - if len(b) >= k { - return b - } - bb := make([]byte, k) - copy(bb[len(bb)-len(b):], b) - return bb -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go deleted file mode 100644 index 6f8ec093..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,385 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "crypto/sha1" - "io" - "io/ioutil" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer. - sha1Checksum bool - iv []byte -} - -func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that -// implements RSA or ECDSA. -func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { - pk := new(PrivateKey) - // In general, the public Keys should be used as pointers. We still - // type-switch on the values, for backwards-compatibility. - switch pubkey := signer.Public().(type) { - case *rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) - case rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) - case *ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) - case ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) - default: - panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") - } - pk.PrivateKey = signer - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - - s2kType := buf[0] - - switch s2kType { - case 0: - pk.s2k = nil - pk.Encrypted = false - case 254, 255: - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - pk.Encrypted = true - pk.s2k, err = s2k.Parse(r) - if err != nil { - return - } - if s2kType == 254 { - pk.sha1Checksum = true - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - pk.encryptedData, err = ioutil.ReadAll(r) - if err != nil { - return - } - - if !pk.Encrypted { - return pk.parsePrivateKey(pk.encryptedData) - } - - return -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - // TODO(agl): support encrypted private keys - buf := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(buf) - if err != nil { - return - } - buf.WriteByte(0 /* no encryption */) - - privateKeyBuf := bytes.NewBuffer(nil) - - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(privateKeyBuf, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(privateKeyBuf, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(privateKeyBuf, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(privateKeyBuf, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - if err != nil { - return - } - - ptype := packetTypePrivateKey - contents := buf.Bytes() - privateKeyBytes := privateKeyBuf.Bytes() - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) - if err != nil { - return - } - _, err = w.Write(contents) - if err != nil { - return - } - _, err = w.Write(privateKeyBytes) - if err != nil { - return - } - - checksum := mod64kHash(privateKeyBytes) - var checksumBytes [2]byte - checksumBytes[0] = byte(checksum >> 8) - checksumBytes[1] = byte(checksum) - _, err = w.Write(checksumBytes[:]) - - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - err := writeBig(w, priv.D) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[1]) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[0]) - if err != nil { - return err - } - return writeBig(w, priv.Precomputed.Qinv) -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - return writeBig(w, priv.D) -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if !pk.Encrypted { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - return pk.parsePrivateKey(data) -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - p, _, err := readMPI(buf) - if err != nil { - return - } - q, _, err := readMPI(buf) - if err != nil { - return - } - - rsaPriv.D = new(big.Int).SetBytes(d) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q) - if err := rsaPriv.Validate(); err != nil { - return err - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - dsaPriv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = dsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - priv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = priv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - - pk.PrivateKey = &ecdsa.PrivateKey{ - PublicKey: *ecdsaPub, - D: new(big.Int).SetBytes(d), - } - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go deleted file mode 100644 index fcd5f525..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -var ( - // NIST curve P-256 - oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} - // NIST curve P-384 - oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} - // NIST curve P-521 - oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} -) - -const maxOIDLength = 8 - -// ecdsaKey stores the algorithm-specific fields for ECDSA keys. -// as defined in RFC 6637, Section 9. -type ecdsaKey struct { - // oid contains the OID byte sequence identifying the elliptic curve used - oid []byte - // p contains the elliptic curve point that represents the public key - p parsedMPI -} - -// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. -func parseOID(r io.Reader) (oid []byte, err error) { - buf := make([]byte, maxOIDLength) - if _, err = readFull(r, buf[:1]); err != nil { - return - } - oidLen := buf[0] - if int(oidLen) > len(buf) { - err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) - return - } - oid = buf[:oidLen] - _, err = readFull(r, oid) - return -} - -func (f *ecdsaKey) parse(r io.Reader) (err error) { - if f.oid, err = parseOID(r); err != nil { - return err - } - f.p.bytes, f.p.bitLength, err = readMPI(r) - return -} - -func (f *ecdsaKey) serialize(w io.Writer) (err error) { - buf := make([]byte, maxOIDLength+1) - buf[0] = byte(len(f.oid)) - copy(buf[1:], f.oid) - if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { - return - } - return writeMPIs(w, f.p) -} - -func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { - var c elliptic.Curve - if bytes.Equal(f.oid, oidCurveP256) { - c = elliptic.P256() - } else if bytes.Equal(f.oid, oidCurveP384) { - c = elliptic.P384() - } else if bytes.Equal(f.oid, oidCurveP521) { - c = elliptic.P521() - } else { - return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) - } - x, y := elliptic.Unmarshal(c, f.p.bytes) - if x == nil { - return nil, errors.UnsupportedError("failed to parse EC point") - } - return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil -} - -func (f *ecdsaKey) byteLen() int { - return 1 + len(f.oid) + 2 + len(f.p.bytes) -} - -type kdfHashFunction byte -type kdfAlgorithm byte - -// ecdhKdf stores key derivation function parameters -// used for ECDH encryption. See RFC 6637, Section 9. -type ecdhKdf struct { - KdfHash kdfHashFunction - KdfAlgo kdfAlgorithm -} - -func (f *ecdhKdf) parse(r io.Reader) (err error) { - buf := make([]byte, 1) - if _, err = readFull(r, buf); err != nil { - return - } - kdfLen := int(buf[0]) - if kdfLen < 3 { - return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - buf = make([]byte, kdfLen) - if _, err = readFull(r, buf); err != nil { - return - } - reserved := int(buf[0]) - f.KdfHash = kdfHashFunction(buf[1]) - f.KdfAlgo = kdfAlgorithm(buf[2]) - if reserved != 0x01 { - return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) - } - return -} - -func (f *ecdhKdf) serialize(w io.Writer) (err error) { - buf := make([]byte, 4) - // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. - buf[0] = byte(0x03) // Length of the following fields - buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now - buf[2] = byte(f.KdfHash) - buf[3] = byte(f.KdfAlgo) - _, err = w.Write(buf[:]) - return -} - -func (f *ecdhKdf) byteLen() int { - return 4 -} - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey - Fingerprint [20]byte - KeyId uint64 - IsSubkey bool - - n, e, p, q, g, y parsedMPI - - // RFC 6637 fields - ec *ecdsaKey - ecdh *ecdhKdf -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -func fromBig(n *big.Int) parsedMPI { - return parsedMPI{ - bytes: n.Bytes(), - bitLength: uint16(n.BitLen()), - } -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: fromBig(pub.P), - q: fromBig(pub.Q), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: fromBig(pub.P), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - ec: new(ecdsaKey), - } - - switch pub.Curve { - case elliptic.P256(): - pk.ec.oid = oidCurveP256 - case elliptic.P384(): - pk.ec.oid = oidCurveP384 - case elliptic.P521(): - pk.ec.oid = oidCurveP521 - default: - panic("unknown elliptic curve") - } - - pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - - // The bit length is 3 (for the 0x04 specifying an uncompressed key) - // plus two field elements (for x and y), which are rounded up to the - // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 - fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 - pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoECDSA: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return err - } - pk.PublicKey, err = pk.ec.newECDSA() - case PubKeyAlgoECDH: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return - } - pk.ecdh = new(ecdhKdf) - if err = pk.ecdh.parse(r); err != nil { - return - } - // The ECDH key is stored in an ecdsa.PublicKey for convenience. - pk.PublicKey, err = pk.ec.newECDSA() - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKey) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := sha1.New() - pk.SerializeSignaturePrefix(fingerPrint) - pk.serializeWithoutHeaders(fingerPrint) - copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n.bytes, pk.n.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.e.bytes, pk.e.bitLength, err = readMPI(r) - if err != nil { - return - } - - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.bytes), - E: 0, - } - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.q.bytes, pk.q.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.bytes) - dsa.Q = new(big.Int).SetBytes(pk.q.bytes) - dsa.G = new(big.Int).SetBytes(pk.g.bytes) - dsa.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.bytes) - elgamal.G = new(big.Int).SetBytes(pk.g.bytes) - elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = elgamal - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - case PubKeyAlgoDSA: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.q.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoElGamal: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoECDSA: - pLength += uint16(pk.ec.byteLen()) - case PubKeyAlgoECDH: - pLength += uint16(pk.ec.byteLen()) - pLength += uint16(pk.ecdh.byteLen()) - default: - panic("unknown public key algorithm") - } - pLength += 6 - h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - case PubKeyAlgoDSA: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.q.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoElGamal: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoECDSA: - length += pk.ec.byteLen() - case PubKeyAlgoECDH: - length += pk.ec.byteLen() - length += pk.ecdh.byteLen() - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [6]byte - buf[0] = 4 - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - buf[5] = byte(pk.PubKeyAlgo) - - _, err = w.Write(buf[:]) - if err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - case PubKeyAlgoDSA: - return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) - case PubKeyAlgoElGamal: - return writeMPIs(w, pk.p, pk.g, pk.y) - case PubKeyAlgoECDSA: - return pk.ec.serialize(w) - case PubKeyAlgoECDH: - if err = pk.ec.serialize(w); err != nil { - return - } - return pk.ecdh.serialize(w) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - case PubKeyAlgoDSA: - dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - default: - panic("shouldn't happen") - } -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - signed.SerializeSignaturePrefix(h) - signed.serializeWithoutHeaders(h) - return -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// A parsedMPI is used to store the contents of a big integer, along with the -// bit length that was specified in the original input. This allows the MPI to -// be reserialized exactly. -type parsedMPI struct { - bytes []byte - bitLength uint16 -} - -// writeMPIs is a utility function for serializing several big integers to the -// given Writer. -func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { - for _, mpi := range mpis { - err = writeMPI(w, mpi.bitLength, mpi.bytes) - if err != nil { - return - } - } - return -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - case PubKeyAlgoDSA: - bitLength = pk.p.bitLength - case PubKeyAlgoElGamal: - bitLength = pk.p.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go deleted file mode 100644 index 5daf7b6c..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/md5" - "crypto/rsa" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" -) - -// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and -// should not be used for signing or encrypting. They are supported here only for -// parsing version 3 key material and validating signatures. -// See RFC 4880, section 5.5.2. -type PublicKeyV3 struct { - CreationTime time.Time - DaysToExpire uint16 - PubKeyAlgo PublicKeyAlgorithm - PublicKey *rsa.PublicKey - Fingerprint [16]byte - KeyId uint64 - IsSubkey bool - - n, e parsedMPI -} - -// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. -// Included here for testing purposes only. RFC 4880, section 5.5.2: -// "an implementation MUST NOT generate a V3 key, but MAY accept it." -func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { - pk := &PublicKeyV3{ - CreationTime: creationTime, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKeyV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [8]byte - if _, err = readFull(r, buf[:]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKeyV3) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := md5.New() - fingerPrint.Write(pk.n.bytes) - fingerPrint.Write(pk.e.bytes) - fingerPrint.Sum(pk.Fingerprint[:0]) - pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { - if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { - return - } - if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { - return - } - - // RFC 4880 Section 12.2 requires the low 8 bytes of the - // modulus to form the key id. - if len(pk.n.bytes) < 8 { - return errors.StructuralError("v3 public key modulus is too short") - } - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - default: - panic("unknown public key algorithm") - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { - length := 8 // 8 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - if err = serializeHeader(w, packetType, length); err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [8]byte - // Version 3 - buf[0] = 3 - // Creation time - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - // Days to expire - buf[5] = byte(pk.DaysToExpire >> 8) - buf[6] = byte(pk.DaysToExpire) - // Public key algorithm - buf[7] = byte(pk.PubKeyAlgo) - - if _, err = w.Write(buf[:]); err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKeyV3) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - default: - // V3 public keys only support RSA. - panic("shouldn't happen") - } -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// userIdSignatureV3Hash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { - if !hfn.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hfn.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - h.Write([]byte(id)) - - return -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKeyV3) KeyIdString() string { - return fmt.Sprintf("%X", pk.KeyId) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKeyV3) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go deleted file mode 100644 index 34bc7c61..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err - } - } - - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go deleted file mode 100644 index b2a24a53..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "encoding/asn1" - "encoding/binary" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage -) - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - CreationTime time.Time - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI - ECDSASigR, ECDSASigS parsedMPI - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - IssuerKeyId *uint64 - IsPrimaryId *bool - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 - RevocationReasonText string - - // MDC is set if this signature has a feature packet that indicates - // support for MDC subpackets. - MDC bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - l := 6 + hashedSubpacketsLength - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - copy(sig.HashSuffix[1:], buf[:5]) - hashedSubpackets := sig.HashSuffix[6:l] - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - // See RFC 4880, section 5.2.4 - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = uint8(l >> 24) - trailer[3] = uint8(l >> 16) - trailer[4] = uint8(l >> 8) - trailer[5] = uint8(l) - - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - } - case PubKeyAlgoECDSA: - sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - primaryUserIdSubpacket signatureSubpacketType = 25 - keyFlagsSubpacket signatureSubpacketType = 27 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - switch packetType { - case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. In practice, the subpacket is used exclusively to - // indicate support for MDC-protected encryption. - sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired. -func (sig *Signature) KeyExpired(currentTime time.Time) bool { - if sig.KeyLifetimeSecs == nil { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix() (err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - - var ok bool - l := 6 + hashedSubpacketsLen - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - sig.HashSuffix[1] = uint8(sig.SigType) - sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) - sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) - sig.HashSuffix[5] = byte(hashedSubpacketsLen) - serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = byte(l >> 24) - trailer[3] = byte(l >> 16) - trailer[4] = byte(l >> 8) - trailer[5] = byte(l) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - err = sig.buildHashSuffix() - if err != nil { - return - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - sig.outSubpackets = sig.buildSubpackets() - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - // supports both *rsa.PrivateKey and crypto.Signer - sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR.bytes = r.Bytes() - sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) - sig.DSASigS.bytes = s.Bytes() - sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) - } - case PubKeyAlgoECDSA: - var r, s *big.Int - if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { - // direct support, avoid asn1 wrapping/unwrapping - r, s, err = ecdsa.Sign(config.Random(), pk, digest) - } else { - var b []byte - b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - if err == nil { - r, s, err = unwrapECDSASig(b) - } - } - if err == nil { - sig.ECDSASigR = fromBig(r) - sig.ECDSASigS = fromBig(s) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA -// signature. -func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { - var ecsdaSig struct { - R, S *big.Int - } - _, err = asn1.Unmarshal(b, &ecsdaSig) - if err != nil { - return - } - return ecsdaSig.R, ecsdaSig.S, nil -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = 2 + len(sig.RSASignature.bytes) - case PubKeyAlgoDSA: - sigLength = 2 + len(sig.DSASigR.bytes) - sigLength += 2 + len(sig.DSASigS.bytes) - case PubKeyAlgoECDSA: - sigLength = 2 + len(sig.ECDSASigR.bytes) - sigLength += 2 + len(sig.ECDSASigS.bytes) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - - _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) - if err != nil { - return - } - - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - case PubKeyAlgoECDSA: - err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - // The following subpackets may only appear in self-signatures - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go deleted file mode 100644 index 6edff889..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// SignatureV3 represents older version 3 signatures. These signatures are less secure -// than version 4 and should not be used to create new signatures. They are included -// here for backwards compatibility to read and validate with older key material. -// See RFC 4880, section 5.2.2. -type SignatureV3 struct { - SigType SignatureType - CreationTime time.Time - IssuerKeyId uint64 - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - HashTag [2]byte - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI -} - -func (sig *SignatureV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.2 - var buf [8]byte - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] != 5 { - err = errors.UnsupportedError( - "invalid hashed material length " + strconv.Itoa(int(buf[0]))) - return - } - - // Read hashed material: signature type + creation time - if _, err = readFull(r, buf[:5]); err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - t := binary.BigEndian.Uint32(buf[1:5]) - sig.CreationTime = time.Unix(int64(t), 0) - - // Eight-octet Key ID of signer. - if _, err = readFull(r, buf[:8]); err != nil { - return - } - sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) - - // Public-key and hash algorithm - if _, err = readFull(r, buf[:2]); err != nil { - return - } - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - var ok bool - if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - // Two-octet field holding left 16 bits of signed hash value. - if _, err = readFull(r, sig.HashTag[:2]); err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { - return - } - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - default: - panic("unreachable") - } - return -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *SignatureV3) Serialize(w io.Writer) (err error) { - buf := make([]byte, 8) - - // Write the sig type and creation time - buf[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) - if _, err = w.Write(buf[:5]); err != nil { - return - } - - // Write the issuer long key ID - binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) - if _, err = w.Write(buf[:8]); err != nil { - return - } - - // Write public key algorithm, hash ID, and hash value - buf[0] = byte(sig.PubKeyAlgo) - hashId, ok := s2k.HashToHashId(sig.Hash) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) - } - buf[1] = hashId - copy(buf[2:4], sig.HashTag[:]) - if _, err = w.Write(buf[:4]); err != nil { - return - } - - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - default: - panic("impossible") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index 744c2d2c..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - CipherFunc CipherFunction - s2k func(out, in []byte) - encryptedKey []byte -} - -const symmetricKeyEncryptedVersion = 4 - -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte - if _, err := readFull(r, buf[:]); err != nil { - return err - } - if buf[0] != symmetricKeyEncryptedVersion { - return errors.UnsupportedError("SymmetricKeyEncrypted version") - } - ske.CipherFunc = CipherFunction(buf[1]) - - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) - } - - var err error - ske.s2k, err = s2k.Parse(r) - if err != nil { - return err - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { - return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + - "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") - } - return plaintextKey, cipherFunc, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The -// packet contains a random session key, encrypted by a key derived from the -// given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - var buf [2]byte - buf[0] = symmetricKeyEncryptedVersion - buf[1] = byte(cipherFunc) - _, err = w.Write(buf[:]) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - sessionKey := make([]byte, keySize) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - - key = sessionKey - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go deleted file mode 100644 index 6126030e..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "golang.org/x/crypto/openpgp/errors" - "hash" - "io" - "strconv" -) - -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - contents io.Reader - prefix []byte -} - -const symmetricallyEncryptedVersion = 1 - -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - if buf[0] != symmetricallyEncryptedVersion { - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.contents = r - return nil -} - -// Decrypt returns a ReadCloser, from which the decrypted contents of the -// packet can be read. An incorrect key can, with high probability, be detected -// immediately and this will result in a KeyIncorrect error being returned. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) - } - if len(key) != keySize { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") - } - - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.MDC { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - if s == nil { - return nil, errors.ErrKeyIncorrect - } - - plaintext := cipher.StreamReader{S: s, R: se.contents} - - if se.MDC { - // MDC packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (MDC) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.SignatureError("error during reading") - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.SignatureError("error during reading") - } - } - - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.SignatureError("MDC packet not found") - } - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.SignatureError("hash mismatch") - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") - } - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) - if err != nil { - return - } - - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) - if err != nil { - return - } - - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - contents = &seMDCWriter{w: plaintext, h: h} - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go deleted file mode 100644 index d19ffbc7..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "image" - "image/jpeg" - "io" - "io/ioutil" -) - -const UserAttrImageSubpacket = 1 - -// UserAttribute is capable of storing other types of data about a user -// beyond name, email and a text comment. In practice, user attributes are typically used -// to store a signed thumbnail photo JPEG image of the user. -// See RFC 4880, section 5.12. -type UserAttribute struct { - Contents []*OpaqueSubpacket -} - -// NewUserAttributePhoto creates a user attribute packet -// containing the given images. -func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { - uat = new(UserAttribute) - for _, photo := range photos { - var buf bytes.Buffer - // RFC 4880, Section 5.12.1. - data := []byte{ - 0x10, 0x00, // Little-endian image header length (16 bytes) - 0x01, // Image header version 1 - 0x01, // JPEG - 0, 0, 0, 0, // 12 reserved octets, must be all zero. - 0, 0, 0, 0, - 0, 0, 0, 0} - if _, err = buf.Write(data); err != nil { - return - } - if err = jpeg.Encode(&buf, photo, nil); err != nil { - return - } - uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) - } - return -} - -// NewUserAttribute creates a new user attribute packet containing the given subpackets. -func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { - return &UserAttribute{Contents: contents} -} - -func (uat *UserAttribute) parse(r io.Reader) (err error) { - // RFC 4880, section 5.13 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uat.Contents, err = OpaqueSubpackets(b) - return -} - -// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including -// header. -func (uat *UserAttribute) Serialize(w io.Writer) (err error) { - var buf bytes.Buffer - for _, sp := range uat.Contents { - sp.Serialize(&buf) - } - if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { - return err - } - _, err = w.Write(buf.Bytes()) - return -} - -// ImageData returns zero or more byte slices, each containing -// JPEG File Interchange Format (JFIF), for each photo in the -// user attribute packet. -func (uat *UserAttribute) ImageData() (imageData [][]byte) { - for _, sp := range uat.Contents { - if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { - imageData = append(imageData, sp.Contents[16:]) - } - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/golang.org/x/crypto/openpgp/packet/userid.go deleted file mode 100644 index d6bea7d4..00000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/userid.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - "io/ioutil" - "strings" -) - -// UserId contains text that is intended to represent the name and email -// address of the key holder. See RFC 4880, section 5.11. By convention, this -// takes the form "Full Name (Comment) " -type UserId struct { - Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. - - Name, Comment, Email string -} - -func hasInvalidCharacters(s string) bool { - for _, c := range s { - switch c { - case '(', ')', '<', '>', 0: - return true - } - } - return false -} - -// NewUserId returns a UserId or nil if any of the arguments contain invalid -// characters. The invalid characters are '\x00', '(', ')', '<' and '>' -func NewUserId(name, comment, email string) *UserId { - // RFC 4880 doesn't deal with the structure of userid strings; the - // name, comment and email form is just a convention. However, there's - // no convention about escaping the metacharacters and GPG just refuses - // to create user ids where, say, the name contains a '('. We mirror - // this behaviour. - - if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { - return nil - } - - uid := new(UserId) - uid.Name, uid.Comment, uid.Email = name, comment, email - uid.Id = name - if len(comment) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "(" - uid.Id += comment - uid.Id += ")" - } - if len(email) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "<" - uid.Id += email - uid.Id += ">" - } - return uid -} - -func (uid *UserId) parse(r io.Reader) (err error) { - // RFC 4880, section 5.11 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uid.Id = string(b) - uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) - return -} - -// Serialize marshals uid to w in the form of an OpenPGP packet, including -// header. -func (uid *UserId) Serialize(w io.Writer) error { - err := serializeHeader(w, packetTypeUserId, len(uid.Id)) - if err != nil { - return err - } - _, err = w.Write([]byte(uid.Id)) - return err -} - -// parseUserId extracts the name, comment and email from a user id string that -// is formatted as "Full Name (Comment) ". -func parseUserId(id string) (name, comment, email string) { - var n, c, e struct { - start, end int - } - var state int - - for offset, rune := range id { - switch state { - case 0: - // Entering name - n.start = offset - state = 1 - fallthrough - case 1: - // In name - if rune == '(' { - state = 2 - n.end = offset - } else if rune == '<' { - state = 5 - n.end = offset - } - case 2: - // Entering comment - c.start = offset - state = 3 - fallthrough - case 3: - // In comment - if rune == ')' { - state = 4 - c.end = offset - } - case 4: - // Between comment and email - if rune == '<' { - state = 5 - } - case 5: - // Entering email - e.start = offset - state = 6 - fallthrough - case 6: - // In email - if rune == '>' { - state = 7 - e.end = offset - } - default: - // After email - } - } - switch state { - case 1: - // ended in the name - n.end = len(id) - case 3: - // ended in comment - c.end = len(id) - case 6: - // ended in email - e.end = len(id) - } - - name = strings.TrimSpace(id[n.start:n.end]) - comment = strings.TrimSpace(id[c.start:c.end]) - email = strings.TrimSpace(id[e.start:e.end]) - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go deleted file mode 100644 index 6ec664f4..00000000 --- a/vendor/golang.org/x/crypto/openpgp/read.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -package openpgp // import "golang.org/x/crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can be trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - SignatureError error // nil if the signature is good. - Signature *packet.Signature // the signature packet itself, if v4 (default) - SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - var se *packet.SymmetricallyEncrypted - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: - break - default: - continue - } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - case *packet.SymmetricallyEncrypted: - se = p - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - } - if len(pk.encryptedKey.Key) == 0 { - continue - } - decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - if err == nil { - decrypted, err = se.Decrypt(cipherFunc, key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - return readSignedMessage(packets, md, keyring) -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if !p.IsLast { - return nil, errors.UnsupportedError("nested signatures") - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md = nil - return - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.SignedBy != nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) - } - h := hashId.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (n int, err error) { - n, err = cr.md.LiteralData.Body.Read(buf) - if err == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - return -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails -} - -func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { - n, err = scr.md.LiteralData.Body.Read(buf) - scr.wrappedHash.Write(buf[:n]) - if err == io.EOF { - var p packet.Packet - p, scr.md.SignatureError = scr.packets.Next() - if scr.md.SignatureError != nil { - return - } - - var ok bool - if scr.md.Signature, ok = p.(*packet.Signature); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) - } else { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") - return - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - } - return -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - packets := packet.NewReader(signature) - for { - p, err = packets.Next() - if err == io.EOF { - return nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, err - } - - switch sig := p.(type) { - case *packet.Signature: - if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - case *packet.SignatureV3: - issuerKeyId = sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - default: - return nil, errors.StructuralError("non signature packet found") - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, err - } - - for _, key := range keys { - switch sig := p.(type) { - case *packet.Signature: - err = key.PublicKey.VerifySignature(h, sig) - case *packet.SignatureV3: - err = key.PublicKey.VerifySignatureV3(h, sig) - default: - panic("unreachable") - } - - if err == nil { - return key.Entity, nil - } - } - - return nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - - return CheckDetachedSignature(keyring, signed, body) -} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go deleted file mode 100644 index 4b9a44ca..00000000 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -package s2k // import "golang.org/x/crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" -) - -// Config collects configuration parameters for s2k key-stretching -// transformatioms. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // Hash is the default hash function to be used. If - // nil, SHA1 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - // SHA1 is the historical default in this package. - return crypto.SHA1 - } - - return c.Hash -} - -func (c *Config) encodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 96 // The common case. Correspoding to 65536 - } - - i := c.S2KCount - switch { - // Behave like GPG. Should we make 65536 the lowest value used? - case i < 1024: - i = 1024 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 1024 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 0; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - hash, ok := HashIdToHash(buf[1]) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) - } - if !hash.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) - } - h := hash.New() - - switch buf[0] { - case 0: - f := func(out, in []byte) { - Simple(out, h, in) - } - return f, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return - } - f := func(out, in []byte) { - Salted(out, h, in, buf[:8]) - } - return f, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return - } - count := decodeCount(buf[8]) - f := func(out, in []byte) { - Iterated(out, h, in, buf[:8], count) - } - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - var buf [11]byte - buf[0] = 3 /* iterated and salted */ - buf[1], _ = HashToHashId(c.hash()) - salt := buf[2:10] - if _, err := io.ReadFull(rand, salt); err != nil { - return err - } - encodedCount := c.encodedCount() - count := decodeCount(encodedCount) - buf[10] = encodedCount - if _, err := w.Write(buf[:]); err != nil { - return err - } - - Iterated(key, c.hash().New(), passphrase, salt, count) - return nil -} - -// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -var hashToHashIdMapping = []struct { - id byte - hash crypto.Hash - name string -}{ - {1, crypto.MD5, "MD5"}, - {2, crypto.SHA1, "SHA1"}, - {3, crypto.RIPEMD160, "RIPEMD160"}, - {8, crypto.SHA256, "SHA256"}, - {9, crypto.SHA384, "SHA384"}, - {10, crypto.SHA512, "SHA512"}, - {11, crypto.SHA224, "SHA224"}, -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.hash, true - } - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.name, true - } - } - - return "", false -} - -// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for _, m := range hashToHashIdMapping { - if m.hash == h { - return m.id, true - } - } - return 0, false -} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go deleted file mode 100644 index 4ee71784..00000000 --- a/vendor/golang.org/x/crypto/openpgp/write.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" - "golang.org/x/crypto/openpgp/s2k" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sig.IssuerKeyId = &signer.PrivateKey.KeyId - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - io.Copy(wrappedHash, message) - - err = sig.Sign(h, signer.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } - - literaldata := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literaldata, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// writeAndSign writes the data as a payload package and, optionally, signs -// it. hints contains optional information, that is also encrypted, -// that aids the recipients in processing the message. The resulting -// WriteCloser must be closed after the contents of the file have been -// written. If config is nil, sensible defaults will be used. -func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.signingKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: packet.SigTypeBinary, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(payload); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := payload - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{w} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil - } - return literalData, nil -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if len(to) == 0 { - return nil, errors.InvalidArgumentError("no encryption recipient provided") - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - // In the event that a recipient doesn't specify any supported ciphers - // or hash functions, these are the ones that we assume that every - // implementation supports. - defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] - defaultHashes := candidateHashes[len(candidateHashes)-1:] - - encryptKeys := make([]Key, len(to)) - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].encryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") - } - - sig := to[i].primaryIdentity().SelfSignature - - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - } - - if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - // If the cipher specified by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) - if err != nil { - return - } - - return writeAndSign(payload, candidateHashes, signed, hints, config) -} - -// Sign signs a message. The resulting WriteCloser must be closed after the -// contents of the file have been written. hints contains optional information -// that aids the recipients in processing the message. -// If config is nil, sensible defaults will be used. -func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { - if signed == nil { - return nil, errors.InvalidArgumentError("no signer provided") - } - - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - defaultHashes := candidateHashes[len(candidateHashes)-1:] - preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config) -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - h hash.Hash - signer *packet.PrivateKey - config *packet.Config -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.h.Write(data) - return s.literalData.Write(data) -} - -func (s signatureWriter) Close() error { - sig := &packet.Signature{ - SigType: packet.SigTypeBinary, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - } - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go deleted file mode 100644 index 3d6f516a..00000000 --- a/vendor/golang.org/x/net/internal/socks/client.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package socks - -import ( - "context" - "errors" - "io" - "net" - "strconv" - "time" -) - -var ( - noDeadline = time.Time{} - aLongTimeAgo = time.Unix(1, 0) -) - -func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { - host, port, err := splitHostPort(address) - if err != nil { - return nil, err - } - if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { - c.SetDeadline(deadline) - defer c.SetDeadline(noDeadline) - } - if ctx != context.Background() { - errCh := make(chan error, 1) - done := make(chan struct{}) - defer func() { - close(done) - if ctxErr == nil { - ctxErr = <-errCh - } - }() - go func() { - select { - case <-ctx.Done(): - c.SetDeadline(aLongTimeAgo) - errCh <- ctx.Err() - case <-done: - errCh <- nil - } - }() - } - - b := make([]byte, 0, 6+len(host)) // the size here is just an estimate - b = append(b, Version5) - if len(d.AuthMethods) == 0 || d.Authenticate == nil { - b = append(b, 1, byte(AuthMethodNotRequired)) - } else { - ams := d.AuthMethods - if len(ams) > 255 { - return nil, errors.New("too many authentication methods") - } - b = append(b, byte(len(ams))) - for _, am := range ams { - b = append(b, byte(am)) - } - } - if _, ctxErr = c.Write(b); ctxErr != nil { - return - } - - if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { - return - } - if b[0] != Version5 { - return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) - } - am := AuthMethod(b[1]) - if am == AuthMethodNoAcceptableMethods { - return nil, errors.New("no acceptable authentication methods") - } - if d.Authenticate != nil { - if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { - return - } - } - - b = b[:0] - b = append(b, Version5, byte(d.cmd), 0) - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - b = append(b, AddrTypeIPv4) - b = append(b, ip4...) - } else if ip6 := ip.To16(); ip6 != nil { - b = append(b, AddrTypeIPv6) - b = append(b, ip6...) - } else { - return nil, errors.New("unknown address type") - } - } else { - if len(host) > 255 { - return nil, errors.New("FQDN too long") - } - b = append(b, AddrTypeFQDN) - b = append(b, byte(len(host))) - b = append(b, host...) - } - b = append(b, byte(port>>8), byte(port)) - if _, ctxErr = c.Write(b); ctxErr != nil { - return - } - - if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { - return - } - if b[0] != Version5 { - return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) - } - if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { - return nil, errors.New("unknown error " + cmdErr.String()) - } - if b[2] != 0 { - return nil, errors.New("non-zero reserved field") - } - l := 2 - var a Addr - switch b[3] { - case AddrTypeIPv4: - l += net.IPv4len - a.IP = make(net.IP, net.IPv4len) - case AddrTypeIPv6: - l += net.IPv6len - a.IP = make(net.IP, net.IPv6len) - case AddrTypeFQDN: - if _, err := io.ReadFull(c, b[:1]); err != nil { - return nil, err - } - l += int(b[0]) - default: - return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) - } - if cap(b) < l { - b = make([]byte, l) - } else { - b = b[:l] - } - if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { - return - } - if a.IP != nil { - copy(a.IP, b) - } else { - a.Name = string(b[:len(b)-2]) - } - a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) - return &a, nil -} - -func splitHostPort(address string) (string, int, error) { - host, port, err := net.SplitHostPort(address) - if err != nil { - return "", 0, err - } - portnum, err := strconv.Atoi(port) - if err != nil { - return "", 0, err - } - if 1 > portnum || portnum > 0xffff { - return "", 0, errors.New("port number out of range " + port) - } - return host, portnum, nil -} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go deleted file mode 100644 index 97db2340..00000000 --- a/vendor/golang.org/x/net/internal/socks/socks.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package socks provides a SOCKS version 5 client implementation. -// -// SOCKS protocol version 5 is defined in RFC 1928. -// Username/Password authentication for SOCKS version 5 is defined in -// RFC 1929. -package socks - -import ( - "context" - "errors" - "io" - "net" - "strconv" -) - -// A Command represents a SOCKS command. -type Command int - -func (cmd Command) String() string { - switch cmd { - case CmdConnect: - return "socks connect" - case cmdBind: - return "socks bind" - default: - return "socks " + strconv.Itoa(int(cmd)) - } -} - -// An AuthMethod represents a SOCKS authentication method. -type AuthMethod int - -// A Reply represents a SOCKS command reply code. -type Reply int - -func (code Reply) String() string { - switch code { - case StatusSucceeded: - return "succeeded" - case 0x01: - return "general SOCKS server failure" - case 0x02: - return "connection not allowed by ruleset" - case 0x03: - return "network unreachable" - case 0x04: - return "host unreachable" - case 0x05: - return "connection refused" - case 0x06: - return "TTL expired" - case 0x07: - return "command not supported" - case 0x08: - return "address type not supported" - default: - return "unknown code: " + strconv.Itoa(int(code)) - } -} - -// Wire protocol constants. -const ( - Version5 = 0x05 - - AddrTypeIPv4 = 0x01 - AddrTypeFQDN = 0x03 - AddrTypeIPv6 = 0x04 - - CmdConnect Command = 0x01 // establishes an active-open forward proxy connection - cmdBind Command = 0x02 // establishes a passive-open forward proxy connection - - AuthMethodNotRequired AuthMethod = 0x00 // no authentication required - AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password - AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods - - StatusSucceeded Reply = 0x00 -) - -// An Addr represents a SOCKS-specific address. -// Either Name or IP is used exclusively. -type Addr struct { - Name string // fully-qualified domain name - IP net.IP - Port int -} - -func (a *Addr) Network() string { return "socks" } - -func (a *Addr) String() string { - if a == nil { - return "" - } - port := strconv.Itoa(a.Port) - if a.IP == nil { - return net.JoinHostPort(a.Name, port) - } - return net.JoinHostPort(a.IP.String(), port) -} - -// A Conn represents a forward proxy connection. -type Conn struct { - net.Conn - - boundAddr net.Addr -} - -// BoundAddr returns the address assigned by the proxy server for -// connecting to the command target address from the proxy server. -func (c *Conn) BoundAddr() net.Addr { - if c == nil { - return nil - } - return c.boundAddr -} - -// A Dialer holds SOCKS-specific options. -type Dialer struct { - cmd Command // either CmdConnect or cmdBind - proxyNetwork string // network between a proxy server and a client - proxyAddress string // proxy server address - - // ProxyDial specifies the optional dial function for - // establishing the transport connection. - ProxyDial func(context.Context, string, string) (net.Conn, error) - - // AuthMethods specifies the list of request authentication - // methods. - // If empty, SOCKS client requests only AuthMethodNotRequired. - AuthMethods []AuthMethod - - // Authenticate specifies the optional authentication - // function. It must be non-nil when AuthMethods is not empty. - // It must return an error when the authentication is failed. - Authenticate func(context.Context, io.ReadWriter, AuthMethod) error -} - -// DialContext connects to the provided address on the provided -// network. -// -// The returned error value may be a net.OpError. When the Op field of -// net.OpError contains "socks", the Source field contains a proxy -// server address and the Addr field contains a command target -// address. -// -// See func Dial of the net package of standard library for a -// description of the network and address parameters. -func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if ctx == nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} - } - var err error - var c net.Conn - if d.ProxyDial != nil { - c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) - } else { - var dd net.Dialer - c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) - } - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - a, err := d.connect(ctx, c, address) - if err != nil { - c.Close() - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - return &Conn{Conn: c, boundAddr: a}, nil -} - -// DialWithConn initiates a connection from SOCKS server to the target -// network and address using the connection c that is already -// connected to the SOCKS server. -// -// It returns the connection's local address assigned by the SOCKS -// server. -func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if ctx == nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} - } - a, err := d.connect(ctx, c, address) - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - return a, nil -} - -// Dial connects to the provided address on the provided network. -// -// Unlike DialContext, it returns a raw transport connection instead -// of a forward proxy connection. -// -// Deprecated: Use DialContext or DialWithConn instead. -func (d *Dialer) Dial(network, address string) (net.Conn, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - var err error - var c net.Conn - if d.ProxyDial != nil { - c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) - } else { - c, err = net.Dial(d.proxyNetwork, d.proxyAddress) - } - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { - c.Close() - return nil, err - } - return c, nil -} - -func (d *Dialer) validateTarget(network, address string) error { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return errors.New("network not implemented") - } - switch d.cmd { - case CmdConnect, cmdBind: - default: - return errors.New("command not implemented") - } - return nil -} - -func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { - for i, s := range []string{d.proxyAddress, address} { - host, port, err := splitHostPort(s) - if err != nil { - return nil, nil, err - } - a := &Addr{Port: port} - a.IP = net.ParseIP(host) - if a.IP == nil { - a.Name = host - } - if i == 0 { - proxy = a - } else { - dst = a - } - } - return -} - -// NewDialer returns a new Dialer that dials through the provided -// proxy server's network and address. -func NewDialer(network, address string) *Dialer { - return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} -} - -const ( - authUsernamePasswordVersion = 0x01 - authStatusSucceeded = 0x00 -) - -// UsernamePassword are the credentials for the username/password -// authentication method. -type UsernamePassword struct { - Username string - Password string -} - -// Authenticate authenticates a pair of username and password with the -// proxy server. -func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { - switch auth { - case AuthMethodNotRequired: - return nil - case AuthMethodUsernamePassword: - if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { - return errors.New("invalid username/password") - } - b := []byte{authUsernamePasswordVersion} - b = append(b, byte(len(up.Username))) - b = append(b, up.Username...) - b = append(b, byte(len(up.Password))) - b = append(b, up.Password...) - // TODO(mikio): handle IO deadlines and cancelation if - // necessary - if _, err := rw.Write(b); err != nil { - return err - } - if _, err := io.ReadFull(rw, b[:2]); err != nil { - return err - } - if b[0] != authUsernamePasswordVersion { - return errors.New("invalid username/password version") - } - if b[1] != authStatusSucceeded { - return errors.New("username/password authentication failed") - } - return nil - } - return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) -} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go deleted file mode 100644 index 811c2e4e..00000000 --- a/vendor/golang.org/x/net/proxy/dial.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" -) - -// A ContextDialer dials using a context. -type ContextDialer interface { - DialContext(ctx context.Context, network, address string) (net.Conn, error) -} - -// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. -// -// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. -// -// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer -// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. -// -// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. -func Dial(ctx context.Context, network, address string) (net.Conn, error) { - d := FromEnvironment() - if xd, ok := d.(ContextDialer); ok { - return xd.DialContext(ctx, network, address) - } - return dialContext(ctx, d, network, address) -} - -// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout -// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. -func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { - var ( - conn net.Conn - done = make(chan struct{}, 1) - err error - ) - go func() { - conn, err = d.Dial(network, address) - close(done) - if conn != nil && ctx.Err() != nil { - conn.Close() - } - }() - select { - case <-ctx.Done(): - err = ctx.Err() - case <-done: - } - return conn, err -} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go deleted file mode 100644 index 3d66bdef..00000000 --- a/vendor/golang.org/x/net/proxy/direct.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" -) - -type direct struct{} - -// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. -var Direct = direct{} - -var ( - _ Dialer = Direct - _ ContextDialer = Direct -) - -// Dial directly invokes net.Dial with the supplied parameters. -func (direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. -func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { - var d net.Dialer - return d.DialContext(ctx, network, addr) -} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go deleted file mode 100644 index 573fe79e..00000000 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" - "strings" -) - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type PerHost struct { - def, bypass Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func NewPerHost(defaultDialer, bypass Dialer) *PerHost { - return &PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -// DialContext connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - d := p.dialerForRequest(host) - if x, ok := d.(ContextDialer); ok { - return x.DialContext(ctx, network, addr) - } - return dialContext(ctx, d, network, addr) -} - -func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go deleted file mode 100644 index 9ff4b9a7..00000000 --- a/vendor/golang.org/x/net/proxy/proxy.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proxy provides support for a variety of protocols to proxy network -// data. -package proxy // import "golang.org/x/net/proxy" - -import ( - "errors" - "net" - "net/url" - "os" - "sync" -) - -// A Dialer is a means to establish a connection. -// Custom dialers should also implement ContextDialer. -type Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy-related -// variables in the environment and makes underlying connections -// directly. -func FromEnvironment() Dialer { - return FromEnvironmentUsing(Direct) -} - -// FromEnvironmentUsing returns the dialer specify by the proxy-related -// variables in the environment and makes underlying connections -// using the provided forwarding Dialer (for instance, a *net.Dialer -// with desired configuration). -func FromEnvironmentUsing(forward Dialer) Dialer { - allProxy := allProxyEnv.Get() - if len(allProxy) == 0 { - return forward - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return forward - } - proxy, err := FromURL(proxyURL, forward) - if err != nil { - return forward - } - - noProxy := noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := NewPerHost(proxy, forward) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { - if proxySchemes == nil { - proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) - } - proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func FromURL(u *url.URL, forward Dialer) (Dialer, error) { - var auth *Auth - if u.User != nil { - auth = new(Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5", "socks5h": - addr := u.Hostname() - port := u.Port() - if port == "" { - port = "1080" - } - return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxySchemes != nil { - if f, ok := proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - allProxyEnv = &envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - noProxyEnv = &envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type envOnce struct { - names []string - once sync.Once - val string -} - -func (e *envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// reset is used by tests -func (e *envOnce) reset() { - e.once = sync.Once{} - e.val = "" -} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go deleted file mode 100644 index c91651f9..00000000 --- a/vendor/golang.org/x/net/proxy/socks5.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" - - "golang.org/x/net/internal/socks" -) - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given -// address with an optional username and password. -// See RFC 1928 and RFC 1929. -func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { - d := socks.NewDialer(network, address) - if forward != nil { - if f, ok := forward.(ContextDialer); ok { - d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { - return f.DialContext(ctx, network, address) - } - } else { - d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { - return dialContext(ctx, forward, network, address) - } - } - } - if auth != nil { - up := socks.UsernamePassword{ - Username: auth.User, - Password: auth.Password, - } - d.AuthMethods = []socks.AuthMethod{ - socks.AuthMethodNotRequired, - socks.AuthMethodUsernamePassword, - } - d.Authenticate = up.Authenticate - } - return d, nil -} diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd7..00000000 --- a/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e9..00000000 --- a/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/sync/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go deleted file mode 100644 index 9857fe53..00000000 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. -package errgroup - -import ( - "context" - "sync" -) - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid and does not cancel on error. -type Group struct { - cancel func() - - wg sync.WaitGroup - - errOnce sync.Once - err error -} - -// WithContext returns a new Group and an associated Context derived from ctx. -// -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. -func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) - return &Group{cancel: cancel}, ctx -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. -func (g *Group) Wait() error { - g.wg.Wait() - if g.cancel != nil { - g.cancel() - } - return g.err -} - -// Go calls the given function in a new goroutine. -// -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. -func (g *Group) Go(f func() error) { - g.wg.Add(1) - - go func() { - defer g.wg.Done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel() - } - }) - } - }() -} diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md new file mode 100644 index 00000000..04b6371a --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -0,0 +1,18 @@ +# Reflection + +Package reflection implements server reflection service. + +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: +```go +import "google.golang.org/grpc/reflection" + +s := grpc.NewServer() +pb.RegisterYourOwnServer(s, &server{}) + +// Register reflection service on gRPC server. +reflection.Register(s) + +s.Serve(lis) +``` diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 00000000..0a12ad22 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,750 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_reflection_v1alpha/reflection.proto + +package grpc_reflection_v1alpha + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are valid to be assigned to MessageRequest: + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest{} } +func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) } +func (*ServerReflectionRequest) ProtoMessage() {} +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_42a8ac412db3cb03, []int{0} +} + +func (m *ServerReflectionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerReflectionRequest.Unmarshal(m, b) +} +func (m *ServerReflectionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerReflectionRequest.Marshal(b, m, deterministic) +} +func (m *ServerReflectionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerReflectionRequest.Merge(m, src) +} +func (m *ServerReflectionRequest) XXX_Size() int { + return xxx_messageInfo_ServerReflectionRequest.Size(m) +} +func (m *ServerReflectionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ServerReflectionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerReflectionRequest proto.InternalMessageInfo + +func (m *ServerReflectionRequest) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (m *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (m *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (m *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (m *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (m *ServerReflectionRequest) GetListServices() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ServerReflectionRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} } +func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionRequest) ProtoMessage() {} +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_42a8ac412db3cb03, []int{1} +} + +func (m *ExtensionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRequest.Unmarshal(m, b) +} +func (m *ExtensionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRequest.Marshal(b, m, deterministic) +} +func (m *ExtensionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRequest.Merge(m, src) +} +func (m *ExtensionRequest) XXX_Size() int { + return xxx_messageInfo_ExtensionRequest.Size(m) +} +func (m *ExtensionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRequest proto.InternalMessageInfo + +func (m *ExtensionRequest) GetContainingType() string { + if m != nil { + return m.ContainingType + } + return "" +} + +func (m *ExtensionRequest) GetExtensionNumber() int32 { + if m != nil { + return m.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the + // message_request in the request. + // + // Types that are valid to be assigned to MessageResponse: + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionResponse{} } +func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) } +func (*ServerReflectionResponse) ProtoMessage() {} +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_42a8ac412db3cb03, []int{2} +} + +func (m *ServerReflectionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerReflectionResponse.Unmarshal(m, b) +} +func (m *ServerReflectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerReflectionResponse.Marshal(b, m, deterministic) +} +func (m *ServerReflectionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerReflectionResponse.Merge(m, src) +} +func (m *ServerReflectionResponse) XXX_Size() int { + return xxx_messageInfo_ServerReflectionResponse.Size(m) +} +func (m *ServerReflectionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ServerReflectionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerReflectionResponse proto.InternalMessageInfo + +func (m *ServerReflectionResponse) GetValidHost() string { + if m != nil { + return m.ValidHost + } + return "" +} + +func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if m != nil { + return m.OriginalRequest + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ServerReflectionResponse) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} } +func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorResponse) ProtoMessage() {} +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_42a8ac412db3cb03, []int{3} +} + +func (m *FileDescriptorResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorResponse.Unmarshal(m, b) +} +func (m *FileDescriptorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorResponse.Marshal(b, m, deterministic) +} +func (m *FileDescriptorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorResponse.Merge(m, src) +} +func (m *FileDescriptorResponse) XXX_Size() int { + return xxx_messageInfo_FileDescriptorResponse.Size(m) +} +func (m *FileDescriptorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorResponse proto.InternalMessageInfo + +func (m *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if m != nil { + return m.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} } +func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionNumberResponse) ProtoMessage() {} +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_42a8ac412db3cb03, []int{4} +} + +func (m *ExtensionNumberResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionNumberResponse.Unmarshal(m, b) +} +func (m *ExtensionNumberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionNumberResponse.Marshal(b, m, deterministic) +} +func (m *ExtensionNumberResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionNumberResponse.Merge(m, src) +} +func (m *ExtensionNumberResponse) XXX_Size() int { + return xxx_messageInfo_ExtensionNumberResponse.Size(m) +} +func (m *ExtensionNumberResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionNumberResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionNumberResponse proto.InternalMessageInfo + +func (m *ExtensionNumberResponse) GetBaseTypeName() string { + if m != nil { + return m.BaseTypeName + } + return "" +} + +func (m *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if m != nil { + return m.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} } +func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceResponse) ProtoMessage() {} +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_42a8ac412db3cb03, []int{5} +} + +func (m *ListServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListServiceResponse.Unmarshal(m, b) +} +func (m *ListServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListServiceResponse.Marshal(b, m, deterministic) +} +func (m *ListServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListServiceResponse.Merge(m, src) +} +func (m *ListServiceResponse) XXX_Size() int { + return xxx_messageInfo_ListServiceResponse.Size(m) +} +func (m *ListServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListServiceResponse proto.InternalMessageInfo + +func (m *ListServiceResponse) GetService() []*ServiceResponse { + if m != nil { + return m.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceResponse) Reset() { *m = ServiceResponse{} } +func (m *ServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ServiceResponse) ProtoMessage() {} +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_42a8ac412db3cb03, []int{6} +} + +func (m *ServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceResponse.Unmarshal(m, b) +} +func (m *ServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceResponse.Marshal(b, m, deterministic) +} +func (m *ServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceResponse.Merge(m, src) +} +func (m *ServiceResponse) XXX_Size() int { + return xxx_messageInfo_ServiceResponse.Size(m) +} +func (m *ServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceResponse proto.InternalMessageInfo + +func (m *ServiceResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ErrorResponse) Reset() { *m = ErrorResponse{} } +func (m *ErrorResponse) String() string { return proto.CompactTextString(m) } +func (*ErrorResponse) ProtoMessage() {} +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_42a8ac412db3cb03, []int{7} +} + +func (m *ErrorResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ErrorResponse.Unmarshal(m, b) +} +func (m *ErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ErrorResponse.Marshal(b, m, deterministic) +} +func (m *ErrorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ErrorResponse.Merge(m, src) +} +func (m *ErrorResponse) XXX_Size() int { + return xxx_messageInfo_ErrorResponse.Size(m) +} +func (m *ErrorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ErrorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ErrorResponse proto.InternalMessageInfo + +func (m *ErrorResponse) GetErrorCode() int32 { + if m != nil { + return m.ErrorCode + } + return 0 +} + +func (m *ErrorResponse) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +func init() { + proto.RegisterType((*ServerReflectionRequest)(nil), "grpc.reflection.v1alpha.ServerReflectionRequest") + proto.RegisterType((*ExtensionRequest)(nil), "grpc.reflection.v1alpha.ExtensionRequest") + proto.RegisterType((*ServerReflectionResponse)(nil), "grpc.reflection.v1alpha.ServerReflectionResponse") + proto.RegisterType((*FileDescriptorResponse)(nil), "grpc.reflection.v1alpha.FileDescriptorResponse") + proto.RegisterType((*ExtensionNumberResponse)(nil), "grpc.reflection.v1alpha.ExtensionNumberResponse") + proto.RegisterType((*ListServiceResponse)(nil), "grpc.reflection.v1alpha.ListServiceResponse") + proto.RegisterType((*ServiceResponse)(nil), "grpc.reflection.v1alpha.ServiceResponse") + proto.RegisterType((*ErrorResponse)(nil), "grpc.reflection.v1alpha.ErrorResponse") +} + +func init() { + proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor_42a8ac412db3cb03) +} + +var fileDescriptor_42a8ac412db3cb03 = []byte{ + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40, + 0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75, + 0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e, + 0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e, + 0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7, + 0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7, + 0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e, + 0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94, + 0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43, + 0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36, + 0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d, + 0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3, + 0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd, + 0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde, + 0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd, + 0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc, + 0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8, + 0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f, + 0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33, + 0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb, + 0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58, + 0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a, + 0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce, + 0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e, + 0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25, + 0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d, + 0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8, + 0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62, + 0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77, + 0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97, + 0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0, + 0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39, + 0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2, + 0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad, + 0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29, + 0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0, + 0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73, + 0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d, + 0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6, + 0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5, + 0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc *grpc.ClientConn +} + +func NewServerReflectionClient(cc *grpc.ClientConn) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &_ServerReflection_serviceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer can be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (*UnimplementedServerReflectionServer) ServerReflectionInfo(srv ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) { + s.RegisterService(&_ServerReflection_serviceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _ServerReflection_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_reflection_v1alpha/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto new file mode 100644 index 00000000..99b00df0 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto @@ -0,0 +1,136 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +syntax = "proto3"; + +package grpc.reflection.v1alpha; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server sets one of the following fields according to the + // message_request in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requests. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services requests. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 00000000..dd22a2da --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,454 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_reflection_v1alpha/reflection.proto + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) + +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "reflect" + "sort" + "sync" + + "github.com/golang/protobuf/proto" + dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + "google.golang.org/grpc/status" +) + +type serverReflectionServer struct { + s *grpc.Server + + initSymbols sync.Once + serviceNames []string + symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files +} + +// Register registers the server reflection service on the given gRPC server. +func Register(s *grpc.Server) { + rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ + s: s, + }) +} + +// protoMessage is used for type assertion on proto messages. +// Generated proto message implements function Descriptor(), but Descriptor() +// is not part of interface proto.Message. This interface is needed to +// call Descriptor(). +type protoMessage interface { + Descriptor() ([]byte, []int) +} + +func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { + s.initSymbols.Do(func() { + serviceInfo := s.s.GetServiceInfo() + + s.symbols = map[string]*dpb.FileDescriptorProto{} + s.serviceNames = make([]string, 0, len(serviceInfo)) + processed := map[string]struct{}{} + for svc, info := range serviceInfo { + s.serviceNames = append(s.serviceNames, svc) + fdenc, ok := parseMetadata(info.Metadata) + if !ok { + continue + } + fd, err := decodeFileDesc(fdenc) + if err != nil { + continue + } + s.processFile(fd, processed) + } + sort.Strings(s.serviceNames) + }) + + return s.serviceNames, s.symbols +} + +func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { + filename := fd.GetName() + if _, ok := processed[filename]; ok { + return + } + processed[filename] = struct{}{} + + prefix := fd.GetPackage() + + for _, msg := range fd.MessageType { + s.processMessage(fd, prefix, msg) + } + for _, en := range fd.EnumType { + s.processEnum(fd, prefix, en) + } + for _, ext := range fd.Extension { + s.processField(fd, prefix, ext) + } + for _, svc := range fd.Service { + svcName := fqn(prefix, svc.GetName()) + s.symbols[svcName] = fd + for _, meth := range svc.Method { + name := fqn(svcName, meth.GetName()) + s.symbols[name] = fd + } + } + + for _, dep := range fd.Dependency { + fdenc := proto.FileDescriptor(dep) + fdDep, err := decodeFileDesc(fdenc) + if err != nil { + continue + } + s.processFile(fdDep, processed) + } +} + +func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { + msgName := fqn(prefix, msg.GetName()) + s.symbols[msgName] = fd + + for _, nested := range msg.NestedType { + s.processMessage(fd, msgName, nested) + } + for _, en := range msg.EnumType { + s.processEnum(fd, msgName, en) + } + for _, ext := range msg.Extension { + s.processField(fd, msgName, ext) + } + for _, fld := range msg.Field { + s.processField(fd, msgName, fld) + } + for _, oneof := range msg.OneofDecl { + oneofName := fqn(msgName, oneof.GetName()) + s.symbols[oneofName] = fd + } +} + +func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { + enName := fqn(prefix, en.GetName()) + s.symbols[enName] = fd + + for _, val := range en.Value { + valName := fqn(enName, val.GetName()) + s.symbols[valName] = fd + } +} + +func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { + fldName := fqn(prefix, fld.GetName()) + s.symbols[fldName] = fd +} + +func fqn(prefix, name string) string { + if prefix == "" { + return name + } + return prefix + "." + name +} + +// fileDescForType gets the file descriptor for the given type. +// The given type should be a proto message. +func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + enc, _ := m.Descriptor() + + return decodeFileDesc(enc) +} + +// decodeFileDesc does decompression and unmarshalling on the given +// file descriptor byte slice. +func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { + raw, err := decompress(enc) + if err != nil { + return nil, fmt.Errorf("failed to decompress enc: %v", err) + } + + fd := new(dpb.FileDescriptorProto) + if err := proto.Unmarshal(raw, fd); err != nil { + return nil, fmt.Errorf("bad descriptor: %v", err) + } + return fd, nil +} + +// decompress does gzip decompression. +func decompress(b []byte) ([]byte, error) { + r, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + } + out, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + } + return out, nil +} + +func typeForName(name string) (reflect.Type, error) { + pt := proto.MessageType(name) + if pt == nil { + return nil, fmt.Errorf("unknown type: %q", name) + } + st := pt.Elem() + + return st, nil +} + +func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + + var extDesc *proto.ExtensionDesc + for id, desc := range proto.RegisteredExtensions(m) { + if id == ext { + extDesc = desc + break + } + } + + if extDesc == nil { + return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) + } + + return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) +} + +func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + + exts := proto.RegisteredExtensions(m) + out := make([]int32, 0, len(exts)) + for id := range exts { + out = append(out, id) + } + return out, nil +} + +// fileDescEncodingByFilename finds the file descriptor for given filename, +// does marshalling on it and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingByFilename(name string) ([]byte, error) { + enc := proto.FileDescriptor(name) + if enc == nil { + return nil, fmt.Errorf("unknown file: %v", name) + } + fd, err := decodeFileDesc(enc) + if err != nil { + return nil, err + } + return proto.Marshal(fd) +} + +// parseMetadata finds the file descriptor bytes specified meta. +// For SupportPackageIsVersion4, m is the name of the proto file, we +// call proto.FileDescriptor to get the byte slice. +// For SupportPackageIsVersion3, m is a byte slice itself. +func parseMetadata(meta interface{}) ([]byte, bool) { + // Check if meta is the file name. + if fileNameForMeta, ok := meta.(string); ok { + return proto.FileDescriptor(fileNameForMeta), true + } + + // Check if meta is the byte slice. + if enc, ok := meta.([]byte); ok { + return enc, true + } + + return nil, false +} + +// fileDescEncodingContainingSymbol finds the file descriptor containing the given symbol, +// does marshalling on it and returns the marshalled result. +// The given symbol can be a type, a service or a method. +func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ([]byte, error) { + _, symbols := s.getSymbols() + fd := symbols[name] + if fd == nil { + // Check if it's a type name that was not present in the + // transitive dependencies of the registered services. + if st, err := typeForName(name); err == nil { + fd, err = s.fileDescForType(st) + if err != nil { + return nil, err + } + } + } + + if fd == nil { + return nil, fmt.Errorf("unknown symbol: %v", name) + } + + return proto.Marshal(fd) +} + +// fileDescEncodingContainingExtension finds the file descriptor containing given extension, +// does marshalling on it and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32) ([]byte, error) { + st, err := typeForName(typeName) + if err != nil { + return nil, err + } + fd, err := fileDescContainingExtension(st, extNum) + if err != nil { + return nil, err + } + return proto.Marshal(fd) +} + +// allExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { + st, err := typeForName(name) + if err != nil { + return nil, err + } + extNums, err := s.allExtensionNumbersForType(st) + if err != nil { + return nil, err + } + return extNums, nil +} + +// ServerReflectionInfo is the reflection service handler. +func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &rpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *rpb.ServerReflectionRequest_FileByFilename: + b, err := s.fileDescEncodingByFilename(req.FileByFilename) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.fileDescEncodingContainingExtension(typeName, extNum) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *rpb.ServerReflectionRequest_ListServices: + svcNames, _ := s.getSymbols() + serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) + for i, n := range svcNames { + serviceResponses[i] = &rpb.ServiceResponse{ + Name: n, + } + } + out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &rpb.ListServiceResponse{ + Service: serviceResponses, + }, + } + default: + return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go deleted file mode 100644 index 168cdb85..00000000 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ /dev/null @@ -1,308 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package bufconn provides a net.Conn implemented by a buffer and related -// dialing and listening functionality. -package bufconn - -import ( - "fmt" - "io" - "net" - "sync" - "time" -) - -// Listener implements a net.Listener that creates local, buffered net.Conns -// via its Accept and Dial method. -type Listener struct { - mu sync.Mutex - sz int - ch chan net.Conn - done chan struct{} -} - -// Implementation of net.Error providing timeout -type netErrorTimeout struct { - error -} - -func (e netErrorTimeout) Timeout() bool { return true } -func (e netErrorTimeout) Temporary() bool { return false } - -var errClosed = fmt.Errorf("closed") -var errTimeout net.Error = netErrorTimeout{error: fmt.Errorf("i/o timeout")} - -// Listen returns a Listener that can only be contacted by its own Dialers and -// creates buffered connections between the two. -func Listen(sz int) *Listener { - return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})} -} - -// Accept blocks until Dial is called, then returns a net.Conn for the server -// half of the connection. -func (l *Listener) Accept() (net.Conn, error) { - select { - case <-l.done: - return nil, errClosed - case c := <-l.ch: - return c, nil - } -} - -// Close stops the listener. -func (l *Listener) Close() error { - l.mu.Lock() - defer l.mu.Unlock() - select { - case <-l.done: - // Already closed. - break - default: - close(l.done) - } - return nil -} - -// Addr reports the address of the listener. -func (l *Listener) Addr() net.Addr { return addr{} } - -// Dial creates an in-memory full-duplex network connection, unblocks Accept by -// providing it the server half of the connection, and returns the client half -// of the connection. -func (l *Listener) Dial() (net.Conn, error) { - p1, p2 := newPipe(l.sz), newPipe(l.sz) - select { - case <-l.done: - return nil, errClosed - case l.ch <- &conn{p1, p2}: - return &conn{p2, p1}, nil - } -} - -type pipe struct { - mu sync.Mutex - - // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respsectively. - // - // Data is read between [r, w) and written to [w, r), wrapping around the end - // of the slice if necessary. - // - // The buffer is empty if r == len(buf), otherwise if r == w, it is full. - // - // w and r are always in the range [0, cap(buf)) and [0, len(buf)]. - buf []byte - w, r int - - wwait sync.Cond - rwait sync.Cond - - // Indicate that a write/read timeout has occurred - wtimedout bool - rtimedout bool - - wtimer *time.Timer - rtimer *time.Timer - - closed bool - writeClosed bool -} - -func newPipe(sz int) *pipe { - p := &pipe{buf: make([]byte, 0, sz)} - p.wwait.L = &p.mu - p.rwait.L = &p.mu - - p.wtimer = time.AfterFunc(0, func() {}) - p.rtimer = time.AfterFunc(0, func() {}) - return p -} - -func (p *pipe) empty() bool { - return p.r == len(p.buf) -} - -func (p *pipe) full() bool { - return p.r < len(p.buf) && p.r == p.w -} - -func (p *pipe) Read(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - // Block until p has data. - for { - if p.closed { - return 0, io.ErrClosedPipe - } - if !p.empty() { - break - } - if p.writeClosed { - return 0, io.EOF - } - if p.rtimedout { - return 0, errTimeout - } - - p.rwait.Wait() - } - wasFull := p.full() - - n = copy(b, p.buf[p.r:len(p.buf)]) - p.r += n - if p.r == cap(p.buf) { - p.r = 0 - p.buf = p.buf[:p.w] - } - - // Signal a blocked writer, if any - if wasFull { - p.wwait.Signal() - } - - return n, nil -} - -func (p *pipe) Write(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.closed { - return 0, io.ErrClosedPipe - } - for len(b) > 0 { - // Block until p is not full. - for { - if p.closed || p.writeClosed { - return 0, io.ErrClosedPipe - } - if !p.full() { - break - } - if p.wtimedout { - return 0, errTimeout - } - - p.wwait.Wait() - } - wasEmpty := p.empty() - - end := cap(p.buf) - if p.w < p.r { - end = p.r - } - x := copy(p.buf[p.w:end], b) - b = b[x:] - n += x - p.w += x - if p.w > len(p.buf) { - p.buf = p.buf[:p.w] - } - if p.w == cap(p.buf) { - p.w = 0 - } - - // Signal a blocked reader, if any. - if wasEmpty { - p.rwait.Signal() - } - } - return n, nil -} - -func (p *pipe) Close() error { - p.mu.Lock() - defer p.mu.Unlock() - p.closed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -func (p *pipe) closeWrite() error { - p.mu.Lock() - defer p.mu.Unlock() - p.writeClosed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -type conn struct { - io.Reader - io.Writer -} - -func (c *conn) Close() error { - err1 := c.Reader.(*pipe).Close() - err2 := c.Writer.(*pipe).closeWrite() - if err1 != nil { - return err1 - } - return err2 -} - -func (c *conn) SetDeadline(t time.Time) error { - c.SetReadDeadline(t) - c.SetWriteDeadline(t) - return nil -} - -func (c *conn) SetReadDeadline(t time.Time) error { - p := c.Reader.(*pipe) - p.mu.Lock() - defer p.mu.Unlock() - p.rtimer.Stop() - p.rtimedout = false - if !t.IsZero() { - p.rtimer = time.AfterFunc(time.Until(t), func() { - p.mu.Lock() - defer p.mu.Unlock() - p.rtimedout = true - p.rwait.Broadcast() - }) - } - return nil -} - -func (c *conn) SetWriteDeadline(t time.Time) error { - p := c.Writer.(*pipe) - p.mu.Lock() - defer p.mu.Unlock() - p.wtimer.Stop() - p.wtimedout = false - if !t.IsZero() { - p.wtimer = time.AfterFunc(time.Until(t), func() { - p.mu.Lock() - defer p.mu.Unlock() - p.wtimedout = true - p.wwait.Broadcast() - }) - } - return nil -} - -func (*conn) LocalAddr() net.Addr { return addr{} } -func (*conn) RemoteAddr() net.Addr { return addr{} } - -type addr struct{} - -func (addr) Network() string { return "bufconn" } -func (addr) String() string { return "bufconn" } diff --git a/vendor/modules.txt b/vendor/modules.txt index 869501d1..e944accc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -6,20 +6,19 @@ cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version cloud.google.com/go/storage +# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 +github.com/Azure/go-ansiterm +github.com/Azure/go-ansiterm/winterm # github.com/Microsoft/go-winio v0.4.14 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/pkg/guid -# github.com/Microsoft/hcsshim v0.8.6 -github.com/Microsoft/hcsshim/osversion +# github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 +github.com/Nvveen/Gotty # github.com/agext/levenshtein v1.2.2 github.com/agext/levenshtein -# github.com/apparentlymart/go-cidr v1.0.1 -github.com/apparentlymart/go-cidr/cidr # github.com/apparentlymart/go-textseg v1.0.0 github.com/apparentlymart/go-textseg/textseg -# github.com/armon/go-radix v1.0.0 -github.com/armon/go-radix -# github.com/aws/aws-sdk-go v1.25.3 +# github.com/aws/aws-sdk-go v1.15.78 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil @@ -29,7 +28,6 @@ github.com/aws/aws-sdk-go/aws/corehandlers github.com/aws/aws-sdk-go/aws/credentials github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds github.com/aws/aws-sdk-go/aws/credentials/endpointcreds -github.com/aws/aws-sdk-go/aws/credentials/processcreds github.com/aws/aws-sdk-go/aws/credentials/stscreds github.com/aws/aws-sdk-go/aws/csm github.com/aws/aws-sdk-go/aws/defaults @@ -41,14 +39,12 @@ github.com/aws/aws-sdk-go/aws/signer/v4 github.com/aws/aws-sdk-go/internal/ini github.com/aws/aws-sdk-go/internal/s3err github.com/aws/aws-sdk-go/internal/sdkio -github.com/aws/aws-sdk-go/internal/sdkmath github.com/aws/aws-sdk-go/internal/sdkrand github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults github.com/aws/aws-sdk-go/private/protocol github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi -github.com/aws/aws-sdk-go/private/protocol/json/jsonutil github.com/aws/aws-sdk-go/private/protocol/query github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest @@ -56,82 +52,49 @@ github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil github.com/aws/aws-sdk-go/service/s3 github.com/aws/aws-sdk-go/service/sts -github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d github.com/bgentry/go-netrc/netrc -# github.com/bgentry/speakeasy v0.1.0 -github.com/bgentry/speakeasy # github.com/cenkalti/backoff v2.2.1+incompatible -github.com/cenkalti/backoff +## explicit +# github.com/cenkalti/backoff/v3 v3.0.0 +github.com/cenkalti/backoff/v3 # github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c -github.com/containerd/continuity/fs +## explicit github.com/containerd/continuity/pathdriver -github.com/containerd/continuity/syscallx -github.com/containerd/continuity/sysx # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible -github.com/docker/distribution/digestset -github.com/docker/distribution/reference -github.com/docker/distribution/registry/api/errcode -# github.com/docker/docker v0.7.3-0.20190506211059-b20a14b54661 -github.com/docker/docker/api -github.com/docker/docker/api/types -github.com/docker/docker/api/types/blkiodev -github.com/docker/docker/api/types/container -github.com/docker/docker/api/types/events -github.com/docker/docker/api/types/filters -github.com/docker/docker/api/types/image -github.com/docker/docker/api/types/mount -github.com/docker/docker/api/types/network -github.com/docker/docker/api/types/registry -github.com/docker/docker/api/types/strslice -github.com/docker/docker/api/types/swarm -github.com/docker/docker/api/types/swarm/runtime -github.com/docker/docker/api/types/time -github.com/docker/docker/api/types/versions -github.com/docker/docker/api/types/volume -github.com/docker/docker/client -github.com/docker/docker/errdefs -github.com/docker/docker/pkg/archive -github.com/docker/docker/pkg/fileutils -github.com/docker/docker/pkg/idtools -github.com/docker/docker/pkg/ioutils -github.com/docker/docker/pkg/longpath -github.com/docker/docker/pkg/mount -github.com/docker/docker/pkg/pools -github.com/docker/docker/pkg/system # github.com/docker/go-connections v0.4.0 github.com/docker/go-connections/nat -github.com/docker/go-connections/sockets -github.com/docker/go-connections/tlsconfig # github.com/docker/go-units v0.4.0 github.com/docker/go-units -# github.com/fatih/color v1.7.0 -github.com/fatih/color -# github.com/gogo/protobuf v1.2.0 -github.com/gogo/protobuf/proto # github.com/golang/protobuf v1.3.4 github.com/golang/protobuf/proto github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp # github.com/google/go-cmp v0.3.1 +## explicit github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/uuid v1.1.1 -github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.0.5 github.com/googleapis/gax-go/v2 # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap # github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-cleanhttp +# github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 +github.com/hashicorp/go-cty/cty +github.com/hashicorp/go-cty/cty/convert +github.com/hashicorp/go-cty/cty/gocty +github.com/hashicorp/go-cty/cty/json +github.com/hashicorp/go-cty/cty/msgpack +github.com/hashicorp/go-cty/cty/set # github.com/hashicorp/go-getter v1.4.0 github.com/hashicorp/go-getter github.com/hashicorp/go-getter/helper/url @@ -139,7 +102,7 @@ github.com/hashicorp/go-getter/helper/url github.com/hashicorp/go-hclog # github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.0.1 +# github.com/hashicorp/go-plugin v1.2.0 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin # github.com/hashicorp/go-safetemp v1.0.0 @@ -150,104 +113,49 @@ github.com/hashicorp/go-uuid github.com/hashicorp/go-version # github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/golang-lru/simplelru -# github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f -github.com/hashicorp/hcl -github.com/hashicorp/hcl/hcl/ast -github.com/hashicorp/hcl/hcl/parser -github.com/hashicorp/hcl/hcl/scanner -github.com/hashicorp/hcl/hcl/strconv -github.com/hashicorp/hcl/hcl/token -github.com/hashicorp/hcl/json/parser -github.com/hashicorp/hcl/json/scanner -github.com/hashicorp/hcl/json/token # github.com/hashicorp/hcl/v2 v2.0.0 github.com/hashicorp/hcl/v2 -github.com/hashicorp/hcl/v2/ext/dynblock -github.com/hashicorp/hcl/v2/ext/typeexpr -github.com/hashicorp/hcl/v2/gohcl -github.com/hashicorp/hcl/v2/hcldec -github.com/hashicorp/hcl/v2/hcled -github.com/hashicorp/hcl/v2/hclparse github.com/hashicorp/hcl/v2/hclsyntax -github.com/hashicorp/hcl/v2/hclwrite -github.com/hashicorp/hcl/v2/json # github.com/hashicorp/logutils v1.0.0 github.com/hashicorp/logutils -# github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 -github.com/hashicorp/terraform-config-inspect/tfconfig # github.com/hashicorp/terraform-json v0.4.0 github.com/hashicorp/terraform-json -# github.com/hashicorp/terraform-plugin-sdk v1.9.0 -github.com/hashicorp/terraform-plugin-sdk/acctest -github.com/hashicorp/terraform-plugin-sdk/helper/customdiff -github.com/hashicorp/terraform-plugin-sdk/helper/hashcode -github.com/hashicorp/terraform-plugin-sdk/helper/logging -github.com/hashicorp/terraform-plugin-sdk/helper/resource -github.com/hashicorp/terraform-plugin-sdk/helper/schema -github.com/hashicorp/terraform-plugin-sdk/httpclient -github.com/hashicorp/terraform-plugin-sdk/internal/addrs -github.com/hashicorp/terraform-plugin-sdk/internal/command/format -github.com/hashicorp/terraform-plugin-sdk/internal/configs -github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload -github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema -github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim -github.com/hashicorp/terraform-plugin-sdk/internal/dag -github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig -github.com/hashicorp/terraform-plugin-sdk/internal/flatmap -github.com/hashicorp/terraform-plugin-sdk/internal/helper/config -github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean -github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin -github.com/hashicorp/terraform-plugin-sdk/internal/httpclient -github.com/hashicorp/terraform-plugin-sdk/internal/initwd -github.com/hashicorp/terraform-plugin-sdk/internal/lang -github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr -github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs -github.com/hashicorp/terraform-plugin-sdk/internal/modsdir -github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps -github.com/hashicorp/terraform-plugin-sdk/internal/plans -github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange -github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert -github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery -github.com/hashicorp/terraform-plugin-sdk/internal/providers -github.com/hashicorp/terraform-plugin-sdk/internal/provisioners -github.com/hashicorp/terraform-plugin-sdk/internal/registry -github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc -github.com/hashicorp/terraform-plugin-sdk/internal/registry/response -github.com/hashicorp/terraform-plugin-sdk/internal/states -github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile -github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags -github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5 -github.com/hashicorp/terraform-plugin-sdk/internal/version -github.com/hashicorp/terraform-plugin-sdk/meta -github.com/hashicorp/terraform-plugin-sdk/plugin -github.com/hashicorp/terraform-plugin-sdk/terraform -# github.com/hashicorp/terraform-plugin-test v1.2.0 +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.1 +## explicit +github.com/hashicorp/terraform-plugin-sdk/v2/acctest +github.com/hashicorp/terraform-plugin-sdk/v2/diag +github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff +github.com/hashicorp/terraform-plugin-sdk/v2/helper/hashcode +github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging +github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource +github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema +github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs +github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema +github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim +github.com/hashicorp/terraform-plugin-sdk/v2/internal/diagutils +github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin +github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange +github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert +github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags +github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5 +github.com/hashicorp/terraform-plugin-sdk/v2/plugin +github.com/hashicorp/terraform-plugin-sdk/v2/terraform +# github.com/hashicorp/terraform-plugin-test v1.3.0 github.com/hashicorp/terraform-plugin-test -# github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 -github.com/hashicorp/terraform-svchost -github.com/hashicorp/terraform-svchost/auth -github.com/hashicorp/terraform-svchost/disco # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux -# github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200414193849-b443faf152d2 +# github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200605105101-6a0dcd01d169 +## explicit github.com/iwarapter/pingaccess-sdk-go/pingaccess -# github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af +# github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 github.com/jmespath/go-jmespath # github.com/konsorten/go-windows-terminal-sequences v1.0.2 github.com/konsorten/go-windows-terminal-sequences -# github.com/mattn/go-colorable v0.1.1 -github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.12 -github.com/mattn/go-isatty -# github.com/mitchellh/cli v1.0.0 -github.com/mitchellh/cli -# github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db -github.com/mitchellh/colorstring # github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir -# github.com/mitchellh/go-testing-interface v1.0.0 +# github.com/mitchellh/go-testing-interface v1.0.4 github.com/mitchellh/go-testing-interface # github.com/mitchellh/go-wordwrap v1.0.0 github.com/mitchellh/go-wordwrap @@ -262,31 +170,50 @@ github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.0.0-rc6 -github.com/opencontainers/runc/libcontainer/system +# github.com/opencontainers/runc v1.0.0-rc9 github.com/opencontainers/runc/libcontainer/user +# github.com/ory/dockertest/v3 v3.6.0 +## explicit +github.com/ory/dockertest/v3 +github.com/ory/dockertest/v3/docker +github.com/ory/dockertest/v3/docker/opts +github.com/ory/dockertest/v3/docker/pkg/archive +github.com/ory/dockertest/v3/docker/pkg/fileutils +github.com/ory/dockertest/v3/docker/pkg/homedir +github.com/ory/dockertest/v3/docker/pkg/idtools +github.com/ory/dockertest/v3/docker/pkg/ioutils +github.com/ory/dockertest/v3/docker/pkg/jsonmessage +github.com/ory/dockertest/v3/docker/pkg/longpath +github.com/ory/dockertest/v3/docker/pkg/mount +github.com/ory/dockertest/v3/docker/pkg/pools +github.com/ory/dockertest/v3/docker/pkg/stdcopy +github.com/ory/dockertest/v3/docker/pkg/system +github.com/ory/dockertest/v3/docker/pkg/term +github.com/ory/dockertest/v3/docker/pkg/term/windows +github.com/ory/dockertest/v3/docker/types +github.com/ory/dockertest/v3/docker/types/blkiodev +github.com/ory/dockertest/v3/docker/types/container +github.com/ory/dockertest/v3/docker/types/filters +github.com/ory/dockertest/v3/docker/types/mount +github.com/ory/dockertest/v3/docker/types/network +github.com/ory/dockertest/v3/docker/types/registry +github.com/ory/dockertest/v3/docker/types/strslice +github.com/ory/dockertest/v3/docker/types/versions # github.com/pkg/errors v0.9.1 +## explicit github.com/pkg/errors -# github.com/posener/complete v1.2.1 -github.com/posener/complete -github.com/posener/complete/cmd -github.com/posener/complete/cmd/install -github.com/posener/complete/match # github.com/sirupsen/logrus v1.5.0 +## explicit github.com/sirupsen/logrus -# github.com/spf13/afero v1.2.2 -github.com/spf13/afero -github.com/spf13/afero/mem -# github.com/testcontainers/testcontainers-go v0.4.0 -github.com/testcontainers/testcontainers-go -github.com/testcontainers/testcontainers-go/wait # github.com/tidwall/gjson v1.3.2 +## explicit github.com/tidwall/gjson # github.com/tidwall/match v1.0.1 github.com/tidwall/match # github.com/tidwall/pretty v1.0.0 github.com/tidwall/pretty # github.com/tidwall/sjson v1.0.4 +## explicit github.com/tidwall/sjson # github.com/ulikunitz/xz v0.5.5 github.com/ulikunitz/xz @@ -303,10 +230,7 @@ github.com/zclconf/go-cty/cty/function github.com/zclconf/go-cty/cty/function/stdlib github.com/zclconf/go-cty/cty/gocty github.com/zclconf/go-cty/cty/json -github.com/zclconf/go-cty/cty/msgpack github.com/zclconf/go-cty/cty/set -# github.com/zclconf/go-cty-yaml v1.0.1 -github.com/zclconf/go-cty-yaml # go.opencensus.io v0.22.0 go.opencensus.io go.opencensus.io/internal @@ -324,26 +248,15 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 -golang.org/x/crypto/bcrypt -golang.org/x/crypto/blowfish -golang.org/x/crypto/cast5 -golang.org/x/crypto/openpgp -golang.org/x/crypto/openpgp/armor -golang.org/x/crypto/openpgp/elgamal -golang.org/x/crypto/openpgp/errors -golang.org/x/crypto/openpgp/packet -golang.org/x/crypto/openpgp/s2k # golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e +## explicit golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries -golang.org/x/net/proxy golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 @@ -351,9 +264,8 @@ golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20190423024810-112230192c58 -golang.org/x/sync/errgroup # golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa +## explicit golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.3.2 @@ -426,9 +338,12 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/naming google.golang.org/grpc/peer +google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -google.golang.org/grpc/test/bufconn +# gopkg.in/yaml.v2 v2.2.8 +## explicit From 0ba7b48b6d0586dbdf0bf80898370d4ddc5910e7 Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Sat, 6 Jun 2020 17:13:25 +0100 Subject: [PATCH 02/23] cleanup the tests for the third party service --- ...rce_pingaccess_third_party_service_test.go | 32 ++++++++++++++++--- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/pingaccess/resource_pingaccess_third_party_service_test.go b/pingaccess/resource_pingaccess_third_party_service_test.go index cde341f5..01a7079d 100644 --- a/pingaccess/resource_pingaccess_third_party_service_test.go +++ b/pingaccess/resource_pingaccess_third_party_service_test.go @@ -12,6 +12,8 @@ import ( ) func TestAccPingAccessThirdPartyService(t *testing.T) { + resourceName := "pingaccess_third_party_service.demo_tps" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -20,13 +22,33 @@ func TestAccPingAccessThirdPartyService(t *testing.T) { { Config: testAccPingAccessThirdPartyServiceConfig("demo service", "localhost:1234"), Check: resource.ComposeTestCheckFunc( - testAccCheckPingAccessThirdPartyServiceExists("pingaccess_third_party_service.demo_tps"), + testAccCheckPingAccessThirdPartyServiceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", "demo service"), + resource.TestCheckResourceAttr(resourceName, "secure", "false"), + resource.TestCheckResourceAttr(resourceName, "trusted_certificate_group_id", "0"), + resource.TestCheckResourceAttr(resourceName, "max_connections", "-1"), + resource.TestCheckResourceAttr(resourceName, "skip_hostname_verification", "false"), + resource.TestCheckNoResourceAttr(resourceName, "expected_hostname"), + resource.TestCheckResourceAttr(resourceName, "availability_profile_id", "1"), + resource.TestCheckResourceAttr(resourceName, "load_balancing_strategy_id", "0"), + resource.TestCheckResourceAttr(resourceName, "use_proxy", "false"), + resource.TestCheckNoResourceAttr(resourceName, "host_value"), ), }, { Config: testAccPingAccessThirdPartyServiceConfig("demo service", "localhost:1235"), Check: resource.ComposeTestCheckFunc( - testAccCheckPingAccessThirdPartyServiceExists("pingaccess_third_party_service.demo_tps"), + testAccCheckPingAccessThirdPartyServiceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", "demo service"), + resource.TestCheckResourceAttr(resourceName, "secure", "false"), + resource.TestCheckResourceAttr(resourceName, "trusted_certificate_group_id", "0"), + resource.TestCheckResourceAttr(resourceName, "max_connections", "-1"), + resource.TestCheckResourceAttr(resourceName, "skip_hostname_verification", "false"), + resource.TestCheckNoResourceAttr(resourceName, "expected_hostname"), + resource.TestCheckResourceAttr(resourceName, "availability_profile_id", "1"), + resource.TestCheckResourceAttr(resourceName, "load_balancing_strategy_id", "0"), + resource.TestCheckResourceAttr(resourceName, "use_proxy", "false"), + resource.TestCheckNoResourceAttr(resourceName, "host_value"), ), }, }, @@ -55,7 +77,7 @@ func testAccCheckPingAccessThirdPartyServiceExists(n string) resource.TestCheckF } if rs.Primary.ID == "" || rs.Primary.ID == "0" { - return fmt.Errorf("No third party service ID is set") + return fmt.Errorf("no third party service ID is set") } conn := testAccProvider.Meta().(*pa.Client).ThirdPartyServices @@ -64,11 +86,11 @@ func testAccCheckPingAccessThirdPartyServiceExists(n string) resource.TestCheckF }) if err != nil { - return fmt.Errorf("Error: ThirdPartyService (%s) not found", n) + return fmt.Errorf("error: ThirdPartyService (%s) not found", n) } if *result.Name != rs.Primary.Attributes["name"] { - return fmt.Errorf("Error: ThirdPartyService response (%s) didnt match state (%s)", *result.Name, rs.Primary.Attributes["name"]) + return fmt.Errorf("error: ThirdPartyService response (%s) didnt match state (%s)", *result.Name, rs.Primary.Attributes["name"]) } return nil From 568e26fe291ab8525a91de51caabf0f3fc5dab9c Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Tue, 9 Jun 2020 13:06:47 +0100 Subject: [PATCH 03/23] WIP switching to context aware functions and use of diag.Diagnostics --- Makefile | 2 +- docs/resources/pingaccess_websession.md | 1 + func-tests/main.tf | 45 ++ go.mod | 1 + pingaccess/config.go | 35 +- .../data_source_pingaccess_acme_default.go | 15 +- .../data_source_pingaccess_certificate.go | 13 +- ...data_source_pingaccess_certificate_test.go | 2 +- pingaccess/data_source_pingaccess_keypair.go | 13 +- .../data_source_pingaccess_keypair_test.go | 2 +- ...ingaccess_pingfederate_runtime_metadata.go | 91 ++- ...cess_pingfederate_runtime_metadata_test.go | 2 +- ...e_pingaccess_trusted_certificate_groups.go | 27 +- ...gaccess_trusted_certificate_groups_test.go | 7 +- pingaccess/diff_suppress_funcs.go | 3 +- pingaccess/provider.go | 22 +- pingaccess/provider_test.go | 49 +- ...ource_pingaccess_access_token_validator.go | 54 +- pingaccess/resource_pingaccess_acme_server.go | 34 +- .../resource_pingaccess_acme_server_test.go | 5 +- pingaccess/resource_pingaccess_application.go | 93 +-- ...esource_pingaccess_application_resource.go | 207 +++--- ...ce_pingaccess_application_resource_test.go | 8 +- .../resource_pingaccess_application_test.go | 9 +- ...source_pingaccess_auth_token_management.go | 50 +- .../resource_pingaccess_authn_req_list.go | 39 +- ...resource_pingaccess_authn_req_list_test.go | 5 +- pingaccess/resource_pingaccess_certificate.go | 103 ++- .../resource_pingaccess_certificate_test.go | 2 +- .../resource_pingaccess_engine_listener.go | 69 +- ...esource_pingaccess_engine_listener_test.go | 5 +- .../resource_pingaccess_hsm_provider.go | 45 +- .../resource_pingaccess_hsm_provider_test.go | 16 +- ...gaccess_http_config_request_host_source.go | 52 +- ...ss_http_config_request_host_source_test.go | 3 +- .../resource_pingaccess_https_listener.go | 71 ++- ...resource_pingaccess_https_listener_test.go | 21 +- .../resource_pingaccess_identity_mapping.go | 85 +-- ...source_pingaccess_identity_mapping_test.go | 151 ++++- pingaccess/resource_pingaccess_keypair.go | 129 ++-- .../resource_pingaccess_keypair_test.go | 2 +- .../resource_pingaccess_oauth_server.go | 77 +-- .../resource_pingaccess_oauth_server_test.go | 2 +- .../resource_pingaccess_pingfederate_admin.go | 92 ++- ...urce_pingaccess_pingfederate_admin_test.go | 7 +- .../resource_pingaccess_pingfederate_oauth.go | 83 +-- ...urce_pingaccess_pingfederate_oauth_test.go | 3 +- ...esource_pingaccess_pingfederate_runtime.go | 75 +-- ...ce_pingaccess_pingfederate_runtime_test.go | 5 +- pingaccess/resource_pingaccess_rule.go | 101 +-- pingaccess/resource_pingaccess_rule_test.go | 42 +- pingaccess/resource_pingaccess_rulesets.go | 117 ++-- .../resource_pingaccess_rulesets_test.go | 55 +- pingaccess/resource_pingaccess_site.go | 162 ++--- .../resource_pingaccess_site_authenticator.go | 55 +- ...urce_pingaccess_site_authenticator_test.go | 102 ++- pingaccess/resource_pingaccess_site_test.go | 82 ++- ...resource_pingaccess_third_party_service.go | 78 +-- ...rce_pingaccess_third_party_service_test.go | 3 + ...e_pingaccess_trusted_certificate_groups.go | 90 ++- ...gaccess_trusted_certificate_groups_test.go | 6 - pingaccess/resource_pingaccess_virtualhost.go | 84 ++- .../resource_pingaccess_virtualhost_test.go | 43 +- pingaccess/resource_pingaccess_websession.go | 177 +++--- .../resource_pingaccess_websession_test.go | 4 +- pingaccess/structures.go | 116 +++- pingaccess/validators.go | 83 +-- pingaccess/validators_test.go | 591 ++++++++---------- 68 files changed, 1943 insertions(+), 1980 deletions(-) diff --git a/Makefile b/Makefile index 21f27f13..ecac97c8 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ func-init: @cd func-tests && terraform init func-plan: - @cd func-tests && terraform plan + @cd func-tests && TF_LOG=TRACE TF_LOG_PATH=./terraform.log terraform plan func-apply: @cd func-tests && TF_LOG=TRACE TF_LOG_PATH=./terraform.log terraform apply -auto-approve diff --git a/docs/resources/pingaccess_websession.md b/docs/resources/pingaccess_websession.md index e606a141..1600c90d 100644 --- a/docs/resources/pingaccess_websession.md +++ b/docs/resources/pingaccess_websession.md @@ -36,6 +36,7 @@ The following arguments are supported: - [`name`](#name) - The name of the web session. - [`oidc_login_type`](#oidc_login_type) - ['Code' or 'POST' or 'x_post']: The web session token type. +- [`pkce_challenge_type`](#pkce_challenge_type) - ['SHA256' or 'OFF']: Specify the code_challenge_method to use for PKCE during the Code login flow. OFF signifies to not use PKCE. - [`pfsession_state_cache_in_seconds`](#pfsession_state_cache_in_seconds) - Specify the number of seconds to cache PingFederate Session State information. diff --git a/func-tests/main.tf b/func-tests/main.tf index 4de5e4d1..f4b4d949 100644 --- a/func-tests/main.tf +++ b/func-tests/main.tf @@ -95,3 +95,48 @@ resource "pingaccess_acme_server" "acc_test" { name = "foo" url = "https://host.docker.internal:14000/dir" } + +resource "pingaccess_identity_mapping" "demo_identity_mapping" { + class_name = "com.pingidentity.pa.identitymappings.JwtIdentityMapping" + name = "demo_identity_mapping" + + configuration = < 0 { if err := d.Set("acme_accounts", flattenLinkViewList(input.AcmeAccounts)); err != nil { - return err + diags = append(diags, diag.FromErr(err)) } } - return nil + return diags } func resourcePingAccessAcmeServerReadData(d *schema.ResourceData) *pingaccess.AcmeServerView { diff --git a/pingaccess/resource_pingaccess_acme_server_test.go b/pingaccess/resource_pingaccess_acme_server_test.go index 32ab5933..fc44ff47 100644 --- a/pingaccess/resource_pingaccess_acme_server_test.go +++ b/pingaccess/resource_pingaccess_acme_server_test.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -58,8 +57,8 @@ func testAccCheckPingAccessAcmeServerExists(n string) resource.TestCheckFunc { return fmt.Errorf("No AcmeServer ID is set") } - conn := testAccProvider.Meta().(*pingaccess.Client).Acme - result, _, err := conn.GetAcmeServerCommand(&pingaccess.GetAcmeServerCommandInput{ + conn := testAccProvider.Meta().(*pa.Client).Acme + result, _, err := conn.GetAcmeServerCommand(&pa.GetAcmeServerCommandInput{ AcmeServerId: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_application.go b/pingaccess/resource_pingaccess_application.go index 5ced95c2..cd98787c 100644 --- a/pingaccess/resource_pingaccess_application.go +++ b/pingaccess/resource_pingaccess_application.go @@ -1,22 +1,24 @@ package pingaccess import ( + "context" "fmt" "log" "strconv" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessApplication() *schema.Resource { return &schema.Resource{ - Create: resourcePingAccessApplicationCreate, - Read: resourcePingAccessApplicationRead, - Update: resourcePingAccessApplicationUpdate, - Delete: resourcePingAccessApplicationDelete, + CreateContext: resourcePingAccessApplicationCreate, + ReadContext: resourcePingAccessApplicationRead, + UpdateContext: resourcePingAccessApplicationUpdate, + DeleteContext: resourcePingAccessApplicationDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: resourcePingAccessApplicationSchema(), @@ -122,33 +124,33 @@ func resourcePingAccessApplicationSchema() map[string]*schema.Schema { } } -func resourcePingAccessApplicationCreate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessApplicationCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Applications input := &pa.AddApplicationCommandInput{ Body: *resourcePingAccessApplicationReadData(d), } result, _, err := svc.AddApplicationCommand(input) if err != nil { - return fmt.Errorf("Error creating application: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create Application: %s", err))} } d.SetId(result.Id.String()) return resourcePingAccessApplicationReadResult(d, &input.Body) } -func resourcePingAccessApplicationRead(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessApplicationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Applications input := &pa.GetApplicationCommandInput{ Id: d.Id(), } result, _, err := svc.GetApplicationCommand(input) if err != nil { - return fmt.Errorf("Error reading application: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read Application: %s", err))} } return resourcePingAccessApplicationReadResult(d, result) } -func resourcePingAccessApplicationUpdate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessApplicationUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Applications input := pa.UpdateApplicationCommandInput{ Body: *resourcePingAccessApplicationReadData(d), @@ -156,12 +158,12 @@ func resourcePingAccessApplicationUpdate(d *schema.ResourceData, m interface{}) } result, _, err := svc.UpdateApplicationCommand(&input) if err != nil { - return fmt.Errorf("Error updating application: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update Application: %s", err))} } return resourcePingAccessApplicationReadResult(d, result) } -func resourcePingAccessApplicationDelete(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessApplicationDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Printf("[INFO] resourcePingAccessApplicationDelete") svc := m.(*pa.Client).Applications @@ -173,54 +175,55 @@ func resourcePingAccessApplicationDelete(d *schema.ResourceData, m interface{}) log.Printf("[INFO] DeleteApplicationCommandInput: %s", input.Id) _, _, err := svc.DeleteApplicationCommand(input) if err != nil { - return fmt.Errorf("Error deleting virtualhost: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete Application: %s", err))} } log.Println("[DEBUG] End - resourcePingAccessSiteDelete") return nil } -func resourcePingAccessApplicationReadResult(d *schema.ResourceData, rv *pa.ApplicationView) (err error) { - setResourceDataInt(d, "access_validator_id", rv.AccessValidatorId) - setResourceDataInt(d, "agent_id", rv.AgentId) - setResourceDataString(d, "application_type", rv.ApplicationType) - setResourceDataBool(d, "case_sensitive_path", rv.CaseSensitivePath) - setResourceDataString(d, "context_root", rv.ContextRoot) - setResourceDataString(d, "default_auth_type", rv.DefaultAuthType) - setResourceDataString(d, "description", rv.Description) - setResourceDataString(d, "destination", rv.Destination) - setResourceDataBool(d, "enabled", rv.Enabled) - setResourceDataString(d, "name", rv.Name) - setResourceDataString(d, "realm", rv.Realm) - setResourceDataBool(d, "require_https", rv.RequireHTTPS) +func resourcePingAccessApplicationReadResult(d *schema.ResourceData, rv *pa.ApplicationView) diag.Diagnostics { + var diags diag.Diagnostics + setResourceDataIntWithDiagnostic(d, "access_validator_id", rv.AccessValidatorId, &diags) + setResourceDataIntWithDiagnostic(d, "agent_id", rv.AgentId, &diags) + setResourceDataStringWithDiagnostic(d, "application_type", rv.ApplicationType, &diags) + setResourceDataBoolWithDiagnostic(d, "case_sensitive_path", rv.CaseSensitivePath, &diags) + setResourceDataStringWithDiagnostic(d, "context_root", rv.ContextRoot, &diags) + setResourceDataStringWithDiagnostic(d, "default_auth_type", rv.DefaultAuthType, &diags) + setResourceDataStringWithDiagnostic(d, "description", rv.Description, &diags) + setResourceDataStringWithDiagnostic(d, "destination", rv.Destination, &diags) + setResourceDataBoolWithDiagnostic(d, "enabled", rv.Enabled, &diags) + setResourceDataStringWithDiagnostic(d, "name", rv.Name, &diags) + setResourceDataStringWithDiagnostic(d, "realm", rv.Realm, &diags) + setResourceDataBoolWithDiagnostic(d, "require_https", rv.RequireHTTPS, &diags) siteID := strconv.Itoa(*rv.SiteId) - setResourceDataString(d, "site_id", &siteID) - setResourceDataBool(d, "spa_support_enabled", rv.SpaSupportEnabled) + setResourceDataStringWithDiagnostic(d, "site_id", &siteID, &diags) + setResourceDataBoolWithDiagnostic(d, "spa_support_enabled", rv.SpaSupportEnabled, &diags) if rv.VirtualHostIds != nil { - vhs := []string{} + var vhs []string for _, vh := range *rv.VirtualHostIds { vhs = append(vhs, strconv.Itoa(*vh)) } if err := d.Set("virtual_host_ids", vhs); err != nil { - return err + diags = append(diags, diag.FromErr(err)) } } if rv.WebSessionId != nil { - d.Set("web_session_id", strconv.Itoa(*rv.WebSessionId)) + setResourceDataStringWithDiagnostic(d, "web_session_id", String(strconv.Itoa(*rv.WebSessionId)), &diags) } if rv.IdentityMappingIds != nil && (*rv.IdentityMappingIds["Web"] != 0 || *rv.IdentityMappingIds["API"] != 0) { - if err = d.Set("identity_mapping_ids", flattenIdentityMappingIds(rv.IdentityMappingIds)); err != nil { - return err + if err := d.Set("identity_mapping_ids", flattenIdentityMappingIds(rv.IdentityMappingIds)); err != nil { + diags = append(diags, diag.FromErr(err)) } } if rv.Policy != nil && (len(*rv.Policy["API"]) > 0 || len(*rv.Policy["Web"]) > 0) { if err := d.Set("policy", flattenPolicy(rv.Policy)); err != nil { - return err + diags = append(diags, diag.FromErr(err)) } } - return nil + return diags } func resourcePingAccessApplicationReadData(d *schema.ResourceData) *pa.ApplicationView { @@ -243,40 +246,40 @@ func resourcePingAccessApplicationReadData(d *schema.ResourceData) *pa.Applicati VirtualHostIds: &vhIds, } - if _, ok := d.GetOkExists("access_validator_id"); ok { + if _, ok := d.GetOk("access_validator_id"); ok { application.AccessValidatorId = Int(d.Get("access_validator_id").(int)) } - if _, ok := d.GetOkExists("case_sensitive_path"); ok { + if _, ok := d.GetOk("case_sensitive_path"); ok { application.CaseSensitivePath = Bool(d.Get("case_sensitive_path").(bool)) } - if _, ok := d.GetOkExists("description"); ok { + if _, ok := d.GetOk("description"); ok { application.Description = String(d.Get("description").(string)) } - if v, ok := d.GetOkExists("destination"); ok { + if v, ok := d.GetOk("destination"); ok { application.Destination = String(v.(string)) } - if _, ok := d.GetOkExists("enabled"); ok { + if _, ok := d.GetOk("enabled"); ok { application.Enabled = Bool(d.Get("enabled").(bool)) } - if _, ok := d.GetOkExists("realm"); ok { + if _, ok := d.GetOk("realm"); ok { application.Realm = String(d.Get("realm").(string)) } - if _, ok := d.GetOkExists("require_https"); ok { + if _, ok := d.GetOk("require_https"); ok { application.RequireHTTPS = Bool(d.Get("require_https").(bool)) } - if _, ok := d.GetOkExists("web_session_id"); ok { + if _, ok := d.GetOk("web_session_id"); ok { webID, _ := strconv.Atoi(d.Get("web_session_id").(string)) application.WebSessionId = Int(webID) } - if val, ok := d.GetOkExists("identity_mapping_ids"); ok { + if val, ok := d.GetOk("identity_mapping_ids"); ok { if len(val.([]interface{})) > 0 { application.IdentityMappingIds = make(map[string]*int) idMapping := val.([]interface{})[0].(map[string]interface{}) @@ -291,7 +294,7 @@ func resourcePingAccessApplicationReadData(d *schema.ResourceData) *pa.Applicati } } - if val, ok := d.GetOkExists("policy"); ok { + if val, ok := d.GetOk("policy"); ok { application.Policy = expandPolicy(val.([]interface{})) } diff --git a/pingaccess/resource_pingaccess_application_resource.go b/pingaccess/resource_pingaccess_application_resource.go index d1a15472..2619fe3c 100644 --- a/pingaccess/resource_pingaccess_application_resource.go +++ b/pingaccess/resource_pingaccess_application_resource.go @@ -1,23 +1,24 @@ package pingaccess import ( + "context" "fmt" "strconv" "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessApplicationResource() *schema.Resource { return &schema.Resource{ - Create: resourcePingAccessApplicationResourceCreate, - Read: resourcePingAccessApplicationResourceRead, - Update: resourcePingAccessApplicationResourceUpdate, - Delete: resourcePingAccessApplicationResourceDelete, + CreateContext: resourcePingAccessApplicationResourceCreate, + ReadContext: resourcePingAccessApplicationResourceRead, + UpdateContext: resourcePingAccessApplicationResourceUpdate, + DeleteContext: resourcePingAccessApplicationResourceDelete, Importer: &schema.ResourceImporter{ - State: resourcePingAccessApplicationResourceImport, + StateContext: resourcePingAccessApplicationResourceImport, }, Schema: resourcePingAccessApplicationResourceSchema(), @@ -26,39 +27,39 @@ func resourcePingAccessApplicationResource() *schema.Resource { func resourcePingAccessApplicationResourceSchema() map[string]*schema.Schema { return map[string]*schema.Schema{ - anonymous: &schema.Schema{ + "anonymous": { Type: schema.TypeBool, Optional: true, }, - applicationID: &schema.Schema{ + "application_id": { Type: schema.TypeString, Required: true, }, - auditLevel: &schema.Schema{ + "audit_level": { Type: schema.TypeString, Optional: true, }, - defaultAuthTypeOverride: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateWebOrAPI, + "default_auth_type_override": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: validateWebOrAPI, }, - enabled: &schema.Schema{ + "enabled": { Type: schema.TypeBool, Optional: true, }, - methods: &schema.Schema{ + "methods": { Type: schema.TypeSet, Required: true, Elem: &schema.Schema{ Type: schema.TypeString, }, }, - name: &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, }, - pathPatterns: &schema.Schema{ + "path_patterns": { Type: schema.TypeSet, Optional: true, Elem: &schema.Resource{ @@ -74,7 +75,7 @@ func resourcePingAccessApplicationResourceSchema() map[string]*schema.Schema { }, }, }, - pathPrefixes: &schema.Schema{ + "path_prefixes": { Type: schema.TypeSet, Optional: true, Deprecated: "To be removed in a future release; please use 'path_patterns' instead", @@ -83,69 +84,71 @@ func resourcePingAccessApplicationResourceSchema() map[string]*schema.Schema { }, }, "policy": applicationPolicySchema(), - rootResource: &schema.Schema{ + "root_resource": { Type: schema.TypeBool, Optional: true, }, - "unprotected": &schema.Schema{ + "unprotected": { Type: schema.TypeBool, Optional: true, }, } } -func resourcePingAccessApplicationResourceCreate(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).Applications +func resourcePingAccessApplicationResourceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + svc := m.(*pa.Client).Applications applicationID := d.Get("application_id").(string) if d.Get("root_resource").(bool) { //Root Resources are created automatically, if one exists we will import it instead of failing to create. - input := pingaccess.GetApplicationResourcesCommandInput{ + input := pa.GetApplicationResourcesCommandInput{ Id: applicationID, Name: "Root Resource", } result, _, err := svc.GetApplicationResourcesCommand(&input) if err != nil { - return fmt.Errorf("Error creating application resource: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create ApplicationResource: %s", err))} } rv := result.Items[0] d.SetId(rv.Id.String()) - d.Set("application_id", strconv.Itoa(*rv.ApplicationId)) - return resourcePingAccessApplicationResourceUpdate(d, m) + setResourceDataStringWithDiagnostic(d, "application_id", String(strconv.Itoa(*rv.ApplicationId)), &diags) + return resourcePingAccessApplicationResourceUpdate(ctx, d, m) } - input := pingaccess.AddApplicationResourceCommandInput{ + input := pa.AddApplicationResourceCommandInput{ Id: applicationID, Body: *resourcePingAccessApplicationResourceReadData(d), } result, _, err := svc.AddApplicationResourceCommand(&input) if err != nil { - return fmt.Errorf("Error creating application resource: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create ApplicationResource: %s", err))} } d.SetId(result.Id.String()) return resourcePingAccessApplicationResourceReadResult(d, result) } -func resourcePingAccessApplicationResourceRead(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).Applications - input := &pingaccess.GetApplicationResourceCommandInput{ +func resourcePingAccessApplicationResourceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).Applications + input := &pa.GetApplicationResourceCommandInput{ ApplicationId: d.Get("application_id").(string), ResourceId: d.Id(), } result, _, err := svc.GetApplicationResourceCommand(input) if err != nil { - return fmt.Errorf("Error reading application resource: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read ApplicationResource: %s", err))} } return resourcePingAccessApplicationResourceReadResult(d, result) } -func resourcePingAccessApplicationResourceUpdate(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).Applications - input := pingaccess.UpdateApplicationResourceCommandInput{ +func resourcePingAccessApplicationResourceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).Applications + input := pa.UpdateApplicationResourceCommandInput{ ApplicationId: d.Get("application_id").(string), ResourceId: d.Id(), Body: *resourcePingAccessApplicationResourceReadData(d), @@ -153,153 +156,115 @@ func resourcePingAccessApplicationResourceUpdate(d *schema.ResourceData, m inter result, _, err := svc.UpdateApplicationResourceCommand(&input) if err != nil { - return fmt.Errorf("Error updating application: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update ApplicationResource: %s", err))} } return resourcePingAccessApplicationResourceReadResult(d, result) } -func resourcePingAccessApplicationResourceDelete(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).Applications +func resourcePingAccessApplicationResourceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).Applications - if d.Get(rootResource).(bool) { + if d.Get("root_resource").(bool) { return nil } - input := &pingaccess.DeleteApplicationResourceCommandInput{ + input := &pa.DeleteApplicationResourceCommandInput{ ResourceId: d.Id(), - ApplicationId: d.Get(applicationID).(string), + ApplicationId: d.Get("application_id").(string), } _, err := svc.DeleteApplicationResourceCommand(input) if err != nil { - return fmt.Errorf("Error deleting application resource: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete ApplicationResource: %s", err))} } return nil } -func resourcePingAccessApplicationResourceImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { +func resourcePingAccessApplicationResourceImport(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { idParts := strings.SplitN(d.Id(), "/", 2) if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { return nil, fmt.Errorf("unexpected format of ID (%q), expected /", d.Id()) } - d.Set("application_id", idParts[0]) + _ = d.Set("application_id", idParts[0]) d.SetId(idParts[1]) return []*schema.ResourceData{d}, nil } -func resourcePingAccessApplicationResourceReadResult(d *schema.ResourceData, rv *pingaccess.ResourceView) error { - setResourceDataBool(d, anonymous, rv.Anonymous) - // if err := d.Set(anonymous, *rv.Anonymous); err != nil { - // return err - // } - // anonymous := d.Get("anonymous").(bool) - // setResourceDataString(d, applicationId, *rv.ApplicationId) - if rv.ApplicationId != nil { - if err := d.Set(applicationID, strconv.Itoa(*rv.ApplicationId)); err != nil { - return err - } +func resourcePingAccessApplicationResourceReadResult(d *schema.ResourceData, rv *pa.ResourceView) diag.Diagnostics { + var diags diag.Diagnostics + setResourceDataBoolWithDiagnostic(d, "anonymous", rv.Anonymous, &diags) + setResourceDataStringWithDiagnostic(d, "application_id", String(strconv.Itoa(*rv.ApplicationId)), &diags) + setResourceDataStringWithDiagnostic(d, "audit_level", rv.AuditLevel, &diags) + setResourceDataStringWithDiagnostic(d, "default_auth_type_override", rv.DefaultAuthTypeOverride, &diags) + setResourceDataBoolWithDiagnostic(d, "enabled", rv.Enabled, &diags) + if err := d.Set("methods", *rv.Methods); err != nil { + diags = append(diags, diag.FromErr(err)) } - // return nil - // if err := d.Set(applicationID, *rv.ApplicationId); err != nil { - // return err - // } - // application_id := d.Get("application_id").(string) - setResourceDataString(d, auditLevel, rv.AuditLevel) - // if err := d.Set(auditLevel, *rv.AuditLevel); err != nil { - // return err - // } - // audit_level := d.Get("audit_level").(string) - setResourceDataString(d, defaultAuthTypeOverride, rv.DefaultAuthTypeOverride) - // if err := d.Set(defaultAuthTypeOverride, *rv.DefaultAuthTypeOverride); err != nil { - // return err - // } - // default_auth_type_override := d.Get("default_auth_type_override").(string) - setResourceDataBool(d, enabled, rv.Enabled) - // if err := d.Set(enabled, *rv.Enabled); err != nil { - // return err - // } - // enabled := d.Get("enabled").(bool) - // methods := d.Get("methods").([]*string) - if err := d.Set(methods, *rv.Methods); err != nil { - return err + if err := d.Set("name", *rv.Name); err != nil { + diags = append(diags, diag.FromErr(err)) } - // methods := expandStringList(d.Get("methods").(*schema.Set).List()) - if err := d.Set(name, *rv.Name); err != nil { - return err - } - // name := d.Get("name").(string) - // path_prefixes := d.Get("path_prefixes").([]*string) if rv.PathPrefixes != nil { - if err := d.Set(pathPrefixes, *rv.PathPrefixes); err != nil { - return err + if err := d.Set("path_prefixes", *rv.PathPrefixes); err != nil { + diags = append(diags, diag.FromErr(err)) } } - - // if err := d.Set("path_patterns", *rv.PathPrefixes); err != nil { - // return err - // } - // path_prefixes := expandStringList(d.Get("path_prefixes").(*schema.Set).List()) - // if err := d.Set("policy", rv.Policy); err != nil { - // return err - // } - // policy := d.Get("policy").(map[string]interface{}) - setResourceDataBool(d, "root_resource", rv.RootResource) - setResourceDataBool(d, "unprotected", rv.Unprotected) + setResourceDataBoolWithDiagnostic(d, "root_resource", rv.RootResource, &diags) + setResourceDataBoolWithDiagnostic(d, "unprotected", rv.Unprotected, &diags) if rv.Policy != nil && (len(*rv.Policy["Web"]) != 0 || len(*rv.Policy["API"]) != 0) { if err := d.Set("policy", flattenPolicy(rv.Policy)); err != nil { - return err + diags = append(diags, diag.FromErr(err)) } } - return nil + return diags } func resourcePingAccessApplicationResourceReadData(d *schema.ResourceData) *pa.ResourceView { - methods := expandStringList(d.Get(methods).(*schema.Set).List()) + methods := expandStringList(d.Get("methods").(*schema.Set).List()) resource := &pa.ResourceView{ Name: String(d.Get("name").(string)), Methods: &methods, } - if _, ok := d.GetOkExists("anonymous"); ok { - resource.Anonymous = Bool(d.Get("anonymous").(bool)) + if v, ok := d.GetOkExists("anonymous"); ok { + resource.Anonymous = Bool(v.(bool)) } - if _, ok := d.GetOkExists("application_id"); ok { - applicationID, _ := strconv.Atoi(d.Get("application_id").(string)) + if v, ok := d.GetOk("application_id"); ok { + applicationID, _ := strconv.Atoi(v.(string)) resource.ApplicationId = Int(applicationID) } - if _, ok := d.GetOkExists("audit_level"); ok { - resource.AuditLevel = String(d.Get("audit_level").(string)) + if v, ok := d.GetOk("audit_level"); ok { + resource.AuditLevel = String(v.(string)) } - if _, ok := d.GetOkExists("default_auth_type_override"); ok { - resource.DefaultAuthTypeOverride = String(d.Get("default_auth_type_override").(string)) + if v, ok := d.GetOk("default_auth_type_override"); ok { + resource.DefaultAuthTypeOverride = String(v.(string)) } - if _, ok := d.GetOkExists("enabled"); ok { - resource.Enabled = Bool(d.Get("enabled").(bool)) + if v, ok := d.GetOkExists("enabled"); ok { + resource.Enabled = Bool(v.(bool)) } - if _, ok := d.GetOkExists("root_resource"); ok { - resource.RootResource = Bool(d.Get("root_resource").(bool)) + if v, ok := d.GetOkExists("root_resource"); ok { + resource.RootResource = Bool(v.(bool)) } - if _, ok := d.GetOkExists("unprotected"); ok { - resource.Unprotected = Bool(d.Get("unprotected").(bool)) + if v, ok := d.GetOkExists("unprotected"); ok { + resource.Unprotected = Bool(v.(bool)) } - if _, ok := d.GetOkExists("path_prefixes"); ok { - pathPrefixes := expandStringList(d.Get("path_prefixes").(*schema.Set).List()) + if v, ok := d.GetOk("path_prefixes"); ok { + pathPrefixes := expandStringList(v.(*schema.Set).List()) resource.PathPrefixes = &pathPrefixes } - if _, ok := d.GetOk("path_patterns"); ok { - pathPatterns := d.Get("path_patterns").(*schema.Set).List() + if v, ok := d.GetOk("path_patterns"); ok { + pathPatterns := v.(*schema.Set).List() for _, raw := range pathPatterns { l := raw.(map[string]interface{}) - p := &pingaccess.PathPatternView{ + p := &pa.PathPatternView{ Pattern: String(l["pattern"].(string)), Type: String(l["type"].(string)), } @@ -307,8 +272,8 @@ func resourcePingAccessApplicationResourceReadData(d *schema.ResourceData) *pa.R } } - if val, ok := d.GetOkExists("policy"); ok { - resource.Policy = expandPolicy(val.([]interface{})) + if v, ok := d.GetOk("policy"); ok { + resource.Policy = expandPolicy(v.([]interface{})) } diff --git a/pingaccess/resource_pingaccess_application_resource_test.go b/pingaccess/resource_pingaccess_application_resource_test.go index 6fe9fd39..556a4b66 100644 --- a/pingaccess/resource_pingaccess_application_resource_test.go +++ b/pingaccess/resource_pingaccess_application_resource_test.go @@ -306,17 +306,17 @@ func Test_resourcePingAccessApplicationResourceReadData(t *testing.T) { // }, PathPrefixes: &[]*string{String("false")}, Policy: map[string]*[]*pa.PolicyItem{ - "Web": &[]*pa.PolicyItem{ - &pa.PolicyItem{ + "Web": { + { Id: json.Number("1"), Type: String("Rule"), }, - &pa.PolicyItem{ + { Id: json.Number("2"), Type: String("RuleSet"), }, }, - "API": &[]*pa.PolicyItem{}, + "API": {}, }, RootResource: Bool(false), Unprotected: Bool(false), diff --git a/pingaccess/resource_pingaccess_application_test.go b/pingaccess/resource_pingaccess_application_test.go index 98745422..e8fba0e7 100644 --- a/pingaccess/resource_pingaccess_application_test.go +++ b/pingaccess/resource_pingaccess_application_test.go @@ -10,12 +10,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessApplication(t *testing.T) { - var out pingaccess.ApplicationView + var out pa.ApplicationView resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -170,7 +169,7 @@ func testAccPingAccessApplicationConfig(name, context, appType string) string { `, name, context, appType, appType, os.Getenv("PINGFEDERATE_TEST_IP")) } -func testAccCheckPingAccessApplicationExists(n string, c int64, out *pingaccess.ApplicationView) resource.TestCheckFunc { +func testAccCheckPingAccessApplicationExists(n string, c int64, out *pa.ApplicationView) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -184,8 +183,8 @@ func testAccCheckPingAccessApplicationExists(n string, c int64, out *pingaccess. return fmt.Errorf("No application ID is set") } - conn := testAccProvider.Meta().(*pingaccess.Client).Applications - result, _, err := conn.GetApplicationCommand(&pingaccess.GetApplicationCommandInput{ + conn := testAccProvider.Meta().(*pa.Client).Applications + result, _, err := conn.GetApplicationCommand(&pa.GetApplicationCommandInput{ Id: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_auth_token_management.go b/pingaccess/resource_pingaccess_auth_token_management.go index 028fde46..0ac01998 100644 --- a/pingaccess/resource_pingaccess_auth_token_management.go +++ b/pingaccess/resource_pingaccess_auth_token_management.go @@ -1,21 +1,23 @@ package pingaccess import ( + "context" "fmt" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourcePingAccessAuthTokenManagement() *schema.Resource { return &schema.Resource{ - Create: resourcePingAccessAuthTokenManagementCreate, - Read: resourcePingAccessAuthTokenManagementRead, - Update: resourcePingAccessAuthTokenManagementUpdate, - Delete: resourcePingAccessAuthTokenManagementDelete, + CreateContext: resourcePingAccessAuthTokenManagementCreate, + ReadContext: resourcePingAccessAuthTokenManagementRead, + UpdateContext: resourcePingAccessAuthTokenManagementUpdate, + DeleteContext: resourcePingAccessAuthTokenManagementDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: resourcePingAccessAuthTokenManagementSchema(), @@ -24,25 +26,25 @@ func resourcePingAccessAuthTokenManagement() *schema.Resource { func resourcePingAccessAuthTokenManagementSchema() map[string]*schema.Schema { return map[string]*schema.Schema{ - "issuer": &schema.Schema{ + "issuer": { Type: schema.TypeString, Optional: true, Default: "PingAccessAuthToken", Description: "The issuer value to include in auth tokens. PingAccess inserts this value as the iss claim within the auth tokens.", }, - "key_roll_enabled": &schema.Schema{ + "key_roll_enabled": { Type: schema.TypeBool, Optional: true, Default: true, Description: "This field is true if key rollover is enabled. When false, PingAccess will not rollover keys at the configured interval.", }, - "key_roll_period_in_hours": &schema.Schema{ + "key_roll_period_in_hours": { Type: schema.TypeInt, Optional: true, Default: 24, Description: "The interval (in hours) at which PingAccess will roll the keys. Key rollover updates keys at regular intervals to ensure the security of signed auth tokens.", }, - "signing_algorithm": &schema.Schema{ + "signing_algorithm": { Type: schema.TypeString, Optional: true, Default: "P-256", @@ -51,50 +53,52 @@ func resourcePingAccessAuthTokenManagementSchema() map[string]*schema.Schema { } } -func resourcePingAccessAuthTokenManagementCreate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessAuthTokenManagementCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { d.SetId("auth_token_management") - return resourcePingAccessAuthTokenManagementUpdate(d, m) + return resourcePingAccessAuthTokenManagementUpdate(ctx, d, m) } -func resourcePingAccessAuthTokenManagementRead(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessAuthTokenManagementRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).AuthTokenManagements result, _, err := svc.GetAuthTokenManagementCommand() if err != nil { - return fmt.Errorf("Error reading auth token management settings: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read AuthTokenManagement: %s", err))} } return resourcePingAccessAuthTokenManagementReadResult(d, result) } -func resourcePingAccessAuthTokenManagementUpdate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessAuthTokenManagementUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).AuthTokenManagements input := pa.UpdateAuthTokenManagementCommandInput{ Body: *resourcePingAccessAuthTokenManagementReadData(d), } result, _, err := svc.UpdateAuthTokenManagementCommand(&input) if err != nil { - return fmt.Errorf("Error updating auth token management settings: %s", err.Error()) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update AuthTokenManagement: %s", err))} } d.SetId("auth_token_management") return resourcePingAccessAuthTokenManagementReadResult(d, result) } -func resourcePingAccessAuthTokenManagementDelete(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessAuthTokenManagementDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).AuthTokenManagements _, err := svc.DeleteAuthTokenManagementCommand() if err != nil { - return fmt.Errorf("Error resetting auth token management: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete AuthTokenManagement: %s", err))} + } return nil } -func resourcePingAccessAuthTokenManagementReadResult(d *schema.ResourceData, input *pa.AuthTokenManagementView) (err error) { - setResourceDataString(d, "issuer", input.Issuer) - setResourceDataBool(d, "key_roll_enabled", input.KeyRollEnabled) - setResourceDataInt(d, "key_roll_period_in_hours", input.KeyRollPeriodInHours) - setResourceDataString(d, "signing_algorithm", input.SigningAlgorithm) - return nil +func resourcePingAccessAuthTokenManagementReadResult(d *schema.ResourceData, input *pa.AuthTokenManagementView) diag.Diagnostics { + var diags diag.Diagnostics + setResourceDataStringWithDiagnostic(d, "issuer", input.Issuer, &diags) + setResourceDataBoolWithDiagnostic(d, "key_roll_enabled", input.KeyRollEnabled, &diags) + setResourceDataIntWithDiagnostic(d, "key_roll_period_in_hours", input.KeyRollPeriodInHours, &diags) + setResourceDataStringWithDiagnostic(d, "signing_algorithm", input.SigningAlgorithm, &diags) + return diags } func resourcePingAccessAuthTokenManagementReadData(d *schema.ResourceData) *pa.AuthTokenManagementView { diff --git a/pingaccess/resource_pingaccess_authn_req_list.go b/pingaccess/resource_pingaccess_authn_req_list.go index 698ffe60..9e24e6eb 100644 --- a/pingaccess/resource_pingaccess_authn_req_list.go +++ b/pingaccess/resource_pingaccess_authn_req_list.go @@ -1,20 +1,22 @@ package pingaccess import ( + "context" "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessAuthnReqList() *schema.Resource { return &schema.Resource{ - Create: resourcePingAccessAuthnReqListCreate, - Read: resourcePingAccessAuthnReqListRead, - Update: resourcePingAccessAuthnReqListUpdate, - Delete: resourcePingAccessAuthnReqListDelete, + CreateContext: resourcePingAccessAuthnReqListCreate, + ReadContext: resourcePingAccessAuthnReqListRead, + UpdateContext: resourcePingAccessAuthnReqListUpdate, + DeleteContext: resourcePingAccessAuthnReqListDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: resourcePingAccessAuthnReqListSchema(), @@ -23,7 +25,7 @@ func resourcePingAccessAuthnReqList() *schema.Resource { func resourcePingAccessAuthnReqListSchema() map[string]*schema.Schema { return map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, }, @@ -31,7 +33,7 @@ func resourcePingAccessAuthnReqListSchema() map[string]*schema.Schema { } } -func resourcePingAccessAuthnReqListCreate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessAuthnReqListCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AuthnReqLists input := pingaccess.AddAuthnReqListCommandInput{ Body: *resourcePingAccessAuthnReqListReadData(d), @@ -39,26 +41,26 @@ func resourcePingAccessAuthnReqListCreate(d *schema.ResourceData, m interface{}) result, _, err := svc.AddAuthnReqListCommand(&input) if err != nil { - return fmt.Errorf("Error creating AuthnReqList: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create AuthnReqList: %s", err))} } d.SetId(result.Id.String()) return resourcePingAccessAuthnReqListReadResult(d, &input.Body) } -func resourcePingAccessAuthnReqListRead(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessAuthnReqListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AuthnReqLists input := &pingaccess.GetAuthnReqListCommandInput{ Id: d.Id(), } result, _, err := svc.GetAuthnReqListCommand(input) if err != nil { - return fmt.Errorf("Error reading AuthnReqList: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read AuthnReqList: %s", err))} } return resourcePingAccessAuthnReqListReadResult(d, result) } -func resourcePingAccessAuthnReqListUpdate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessAuthnReqListUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AuthnReqLists input := pingaccess.UpdateAuthnReqListCommandInput{ Body: *resourcePingAccessAuthnReqListReadData(d), @@ -67,12 +69,12 @@ func resourcePingAccessAuthnReqListUpdate(d *schema.ResourceData, m interface{}) result, _, err := svc.UpdateAuthnReqListCommand(&input) if err != nil { - return fmt.Errorf("Error updating AuthnReqList: %s", err.Error()) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update AuthnReqList: %s", err))} } return resourcePingAccessAuthnReqListReadResult(d, result) } -func resourcePingAccessAuthnReqListDelete(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessAuthnReqListDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AuthnReqLists input := &pingaccess.DeleteAuthnReqListCommandInput{ Id: d.Id(), @@ -80,17 +82,18 @@ func resourcePingAccessAuthnReqListDelete(d *schema.ResourceData, m interface{}) _, err := svc.DeleteAuthnReqListCommand(input) if err != nil { - return fmt.Errorf("Error deleting AuthnReqList: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete AuthnReqList: %s", err))} } return nil } -func resourcePingAccessAuthnReqListReadResult(d *schema.ResourceData, input *pingaccess.AuthnReqListView) error { - setResourceDataString(d, "name", input.Name) +func resourcePingAccessAuthnReqListReadResult(d *schema.ResourceData, input *pingaccess.AuthnReqListView) diag.Diagnostics { + var diags diag.Diagnostics + setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) if err := d.Set("authn_reqs", input.AuthnReqs); err != nil { - return err + diags = append(diags, diag.FromErr(err)) } - return nil + return diags } func resourcePingAccessAuthnReqListReadData(d *schema.ResourceData) *pingaccess.AuthnReqListView { diff --git a/pingaccess/resource_pingaccess_authn_req_list_test.go b/pingaccess/resource_pingaccess_authn_req_list_test.go index af96c0bc..cdfbbbd3 100644 --- a/pingaccess/resource_pingaccess_authn_req_list_test.go +++ b/pingaccess/resource_pingaccess_authn_req_list_test.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -60,8 +59,8 @@ func testAccCheckPingAccessAuthnReqListExists(n string) resource.TestCheckFunc { return fmt.Errorf("No AuthnReqList ID is set") } - conn := testAccProvider.Meta().(*pingaccess.Client).AuthnReqLists - result, _, err := conn.GetAuthnReqListCommand(&pingaccess.GetAuthnReqListCommandInput{ + conn := testAccProvider.Meta().(*pa.Client).AuthnReqLists + result, _, err := conn.GetAuthnReqListCommand(&pa.GetAuthnReqListCommandInput{ Id: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_certificate.go b/pingaccess/resource_pingaccess_certificate.go index 65bd7b63..6ed1a2ac 100644 --- a/pingaccess/resource_pingaccess_certificate.go +++ b/pingaccess/resource_pingaccess_certificate.go @@ -1,20 +1,22 @@ package pingaccess import ( + "context" "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessCertificate() *schema.Resource { return &schema.Resource{ - Create: resourcePingAccessCertificateCreate, - Read: resourcePingAccessCertificateRead, - Update: resourcePingAccessCertificateUpdate, - Delete: resourcePingAccessCertificateDelete, + CreateContext: resourcePingAccessCertificateCreate, + ReadContext: resourcePingAccessCertificateRead, + UpdateContext: resourcePingAccessCertificateUpdate, + DeleteContext: resourcePingAccessCertificateDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: resourcePingAccessCertificateSchema(), @@ -23,59 +25,58 @@ func resourcePingAccessCertificate() *schema.Resource { func resourcePingAccessCertificateSchema() map[string]*schema.Schema { return map[string]*schema.Schema{ - "alias": &schema.Schema{ + "alias": { Type: schema.TypeString, Required: true, }, - "file_data": &schema.Schema{ + "file_data": { Type: schema.TypeString, Required: true, }, - "expires": &schema.Schema{ + "expires": { Type: schema.TypeInt, Computed: true, }, - "issuer_dn": &schema.Schema{ + "issuer_dn": { Type: schema.TypeString, Computed: true, }, - "md5sum": &schema.Schema{ + "md5sum": { Type: schema.TypeString, Computed: true, }, - "serial_number": &schema.Schema{ + "serial_number": { Type: schema.TypeString, Computed: true, }, - "sha1sum": &schema.Schema{ + "sha1sum": { Type: schema.TypeString, Computed: true, }, - "signature_algorithm": &schema.Schema{ + "signature_algorithm": { Type: schema.TypeString, Computed: true, }, - "status": &schema.Schema{ + "status": { Type: schema.TypeString, Computed: true, }, - // "subject_alternative_names": setOfString(), - "subject_cn": &schema.Schema{ + "subject_cn": { Type: schema.TypeString, Computed: true, }, - "subject_dn": &schema.Schema{ + "subject_dn": { Type: schema.TypeString, Computed: true, }, - "valid_from": &schema.Schema{ + "valid_from": { Type: schema.TypeInt, Computed: true, }, } } -func resourcePingAccessCertificateCreate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessCertificateCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { input := pa.ImportTrustedCertInput{ Body: pa.X509FileImportDocView{ Alias: String(d.Get("alias").(string)), @@ -87,26 +88,26 @@ func resourcePingAccessCertificateCreate(d *schema.ResourceData, m interface{}) result, _, err := svc.ImportTrustedCert(&input) if err != nil { - return fmt.Errorf("Error creating Certificate: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create Certificate: %s", err))} } d.SetId(result.Id.String()) return resourcePingAccessCertificateReadResult(d, result) } -func resourcePingAccessCertificateRead(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessCertificateRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Certificates input := &pa.GetTrustedCertInput{ Id: d.Id(), } result, _, err := svc.GetTrustedCert(input) if err != nil { - return fmt.Errorf("Error reading Certificate: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read Certificate: %s", err))} } return resourcePingAccessCertificateReadResult(d, result) } -func resourcePingAccessCertificateUpdate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessCertificateUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { input := pa.UpdateTrustedCertInput{ Body: pa.X509FileImportDocView{ Alias: String(d.Get("alias").(string)), @@ -119,57 +120,33 @@ func resourcePingAccessCertificateUpdate(d *schema.ResourceData, m interface{}) result, _, err := svc.UpdateTrustedCert(&input) if err != nil { - return fmt.Errorf("Error creating Certificate: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update Certificate: %s", err))} } d.SetId(result.Id.String()) return resourcePingAccessCertificateReadResult(d, result) } -func resourcePingAccessCertificateDelete(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessCertificateDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Certificates _, err := svc.DeleteTrustedCertCommand(&pa.DeleteTrustedCertCommandInput{Id: d.Id()}) if err != nil { - return fmt.Errorf("Error deleting virtualhost: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete Certificate: %s", err))} } return nil } -func resourcePingAccessCertificateReadResult(d *schema.ResourceData, rv *pa.TrustedCertView) error { - if err := d.Set("alias", rv.Alias); err != nil { - return err - } - if err := d.Set("expires", rv.Expires); err != nil { - return err - } - if err := d.Set("issuer_dn", rv.IssuerDn); err != nil { - return err - } - if err := d.Set("md5sum", rv.Md5sum); err != nil { - return err - } - if err := d.Set("serial_number", rv.SerialNumber); err != nil { - return err - } - if err := d.Set("sha1sum", rv.Sha1sum); err != nil { - return err - } - if err := d.Set("signature_algorithm", rv.SignatureAlgorithm); err != nil { - return err - } - if err := d.Set("status", rv.Status); err != nil { - return err - } - if err := d.Set("subject_dn", rv.SubjectDn); err != nil { - return err - } - if err := d.Set("valid_from", rv.ValidFrom); err != nil { - return err - } - // "subject_alternative_names": setOfString(), - // "subject_cn": &schema.Schema{ - // Type: schema.TypeString, - // Computed: true, - // }, - return nil +func resourcePingAccessCertificateReadResult(d *schema.ResourceData, rv *pa.TrustedCertView) diag.Diagnostics { + var diags diag.Diagnostics + setResourceDataStringWithDiagnostic(d, "alias", rv.Alias, &diags) + setResourceDataIntWithDiagnostic(d, "expires", rv.Expires, &diags) + setResourceDataStringWithDiagnostic(d, "issuer_dn", rv.IssuerDn, &diags) + setResourceDataStringWithDiagnostic(d, "md5sum", rv.Md5sum, &diags) + setResourceDataStringWithDiagnostic(d, "serial_number", rv.SerialNumber, &diags) + setResourceDataStringWithDiagnostic(d, "sha1sum", rv.Sha1sum, &diags) + setResourceDataStringWithDiagnostic(d, "signature_algorithm", rv.SignatureAlgorithm, &diags) + setResourceDataStringWithDiagnostic(d, "status", rv.Status, &diags) + setResourceDataStringWithDiagnostic(d, "subject_dn", rv.SubjectDn, &diags) + setResourceDataIntWithDiagnostic(d, "valid_from", rv.ValidFrom, &diags) + return diags } diff --git a/pingaccess/resource_pingaccess_certificate_test.go b/pingaccess/resource_pingaccess_certificate_test.go index 25a3a7fc..4c4975a8 100644 --- a/pingaccess/resource_pingaccess_certificate_test.go +++ b/pingaccess/resource_pingaccess_certificate_test.go @@ -82,7 +82,7 @@ func testAccCheckPingAccessCertificateExists(n string) resource.TestCheckFunc { func testAccCheckPingAccessCertificateAttributes(n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, _ := s.RootModule().Resources[n] + rs := s.RootModule().Resources[n] if rs.Primary.ID == "" || rs.Primary.ID == "0" { return fmt.Errorf("No Certificate ID is set") } diff --git a/pingaccess/resource_pingaccess_engine_listener.go b/pingaccess/resource_pingaccess_engine_listener.go index c810f496..45da8e49 100644 --- a/pingaccess/resource_pingaccess_engine_listener.go +++ b/pingaccess/resource_pingaccess_engine_listener.go @@ -1,20 +1,22 @@ package pingaccess import ( + "context" "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessEngineListener() *schema.Resource { return &schema.Resource{ - Create: resourcePingAccessEngineListenerCreate, - Read: resourcePingAccessEngineListenerRead, - Update: resourcePingAccessEngineListenerUpdate, - Delete: resourcePingAccessEngineListenerDelete, + CreateContext: resourcePingAccessEngineListenerCreate, + ReadContext: resourcePingAccessEngineListenerRead, + UpdateContext: resourcePingAccessEngineListenerUpdate, + DeleteContext: resourcePingAccessEngineListenerDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: resourcePingAccessEngineListenerSchema(), @@ -23,88 +25,89 @@ func resourcePingAccessEngineListener() *schema.Resource { func resourcePingAccessEngineListenerSchema() map[string]*schema.Schema { return map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, }, - "port": &schema.Schema{ + "port": { Type: schema.TypeInt, Required: true, }, - "secure": &schema.Schema{ + "secure": { Type: schema.TypeBool, Optional: true, }, } } -func resourcePingAccessEngineListenerCreate(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).EngineListeners - input := pingaccess.AddEngineListenerCommandInput{ +func resourcePingAccessEngineListenerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).EngineListeners + input := pa.AddEngineListenerCommandInput{ Body: *resourcePingAccessEngineListenerReadData(d), } result, _, err := svc.AddEngineListenerCommand(&input) if err != nil { - return fmt.Errorf("Error creating EngineListener: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create EngineListener: %s", err))} } d.SetId(result.Id.String()) return resourcePingAccessEngineListenerReadResult(d, &input.Body) } -func resourcePingAccessEngineListenerRead(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).EngineListeners - input := &pingaccess.GetEngineListenerCommandInput{ +func resourcePingAccessEngineListenerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).EngineListeners + input := &pa.GetEngineListenerCommandInput{ Id: d.Id(), } result, _, err := svc.GetEngineListenerCommand(input) if err != nil { - return fmt.Errorf("Error reading EngineListener: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read EngineListener: %s", err))} } return resourcePingAccessEngineListenerReadResult(d, result) } -func resourcePingAccessEngineListenerUpdate(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).EngineListeners - input := pingaccess.UpdateEngineListenerCommandInput{ +func resourcePingAccessEngineListenerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).EngineListeners + input := pa.UpdateEngineListenerCommandInput{ Body: *resourcePingAccessEngineListenerReadData(d), Id: d.Id(), } result, _, err := svc.UpdateEngineListenerCommand(&input) if err != nil { - return fmt.Errorf("Error updating EngineListener: %s", err.Error()) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update EngineListener: %s", err))} } return resourcePingAccessEngineListenerReadResult(d, result) } -func resourcePingAccessEngineListenerDelete(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).EngineListeners - input := &pingaccess.DeleteEngineListenerCommandInput{ +func resourcePingAccessEngineListenerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).EngineListeners + input := &pa.DeleteEngineListenerCommandInput{ Id: d.Id(), } _, err := svc.DeleteEngineListenerCommand(input) if err != nil { - return fmt.Errorf("Error deleting EngineListener: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete EngineListener: %s", err))} + } return nil } -func resourcePingAccessEngineListenerReadResult(d *schema.ResourceData, input *pingaccess.EngineListenerView) error { - setResourceDataString(d, "name", input.Name) - setResourceDataInt(d, "port", input.Port) - setResourceDataBool(d, "secure", input.Secure) - return nil +func resourcePingAccessEngineListenerReadResult(d *schema.ResourceData, input *pa.EngineListenerView) diag.Diagnostics { + var diags diag.Diagnostics + setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) + setResourceDataIntWithDiagnostic(d, "port", input.Port, &diags) + setResourceDataBoolWithDiagnostic(d, "secure", input.Secure, &diags) + return diags } -func resourcePingAccessEngineListenerReadData(d *schema.ResourceData) *pingaccess.EngineListenerView { - engine := &pingaccess.EngineListenerView{ +func resourcePingAccessEngineListenerReadData(d *schema.ResourceData) *pa.EngineListenerView { + engine := &pa.EngineListenerView{ Name: String(d.Get("name").(string)), Port: Int(d.Get("port").(int)), } - if v, ok := d.GetOkExists("secure"); ok { engine.Secure = Bool(v.(bool)) } diff --git a/pingaccess/resource_pingaccess_engine_listener_test.go b/pingaccess/resource_pingaccess_engine_listener_test.go index 90986a95..86b27cfb 100644 --- a/pingaccess/resource_pingaccess_engine_listener_test.go +++ b/pingaccess/resource_pingaccess_engine_listener_test.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -58,8 +57,8 @@ func testAccCheckPingAccessEngineListenerExists(n string) resource.TestCheckFunc return fmt.Errorf("No EngineListener ID is set") } - conn := testAccProvider.Meta().(*pingaccess.Client).EngineListeners - result, _, err := conn.GetEngineListenerCommand(&pingaccess.GetEngineListenerCommandInput{ + conn := testAccProvider.Meta().(*pa.Client).EngineListeners + result, _, err := conn.GetEngineListenerCommand(&pa.GetEngineListenerCommandInput{ Id: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_hsm_provider.go b/pingaccess/resource_pingaccess_hsm_provider.go index 52771715..47d6ed4f 100644 --- a/pingaccess/resource_pingaccess_hsm_provider.go +++ b/pingaccess/resource_pingaccess_hsm_provider.go @@ -1,22 +1,25 @@ package pingaccess import ( + "context" "crypto/tls" "encoding/json" "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" - "net/http" ) func resourcePingAccessHsmProvider() *schema.Resource { return &schema.Resource{ - Create: resourcePingAccessHsmProviderCreate, - Read: resourcePingAccessHsmProviderRead, - Update: resourcePingAccessHsmProviderUpdate, - Delete: resourcePingAccessHsmProviderDelete, + CreateContext: resourcePingAccessHsmProviderCreate, + ReadContext: resourcePingAccessHsmProviderRead, + UpdateContext: resourcePingAccessHsmProviderUpdate, + DeleteContext: resourcePingAccessHsmProviderDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: resourcePingAccessHsmProviderSchema(), } @@ -39,7 +42,7 @@ func resourcePingAccessHsmProviderSchema() map[string]*schema.Schema { } } -func resourcePingAccessHsmProviderCreate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessHsmProviderCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).HsmProviders input := pingaccess.AddHsmProviderCommandInput{ Body: *resourcePingAccessHsmProviderReadData(d), @@ -47,26 +50,26 @@ func resourcePingAccessHsmProviderCreate(d *schema.ResourceData, m interface{}) result, _, err := svc.AddHsmProviderCommand(&input) if err != nil { - return fmt.Errorf("Error creating HsmProvider: %s", err.Error()) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create HsmProvider: %s", err))} } d.SetId(result.Id.String()) return resourcePingAccessHsmProviderReadResult(d, result, svc) } -func resourcePingAccessHsmProviderRead(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessHsmProviderRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).HsmProviders input := &pingaccess.GetHsmProviderCommandInput{ Id: d.Id(), } result, _, err := svc.GetHsmProviderCommand(input) if err != nil { - return fmt.Errorf("Error reading HsmProvider: %s", err.Error()) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read HsmProvider: %s", err))} } return resourcePingAccessHsmProviderReadResult(d, result, svc) } -func resourcePingAccessHsmProviderUpdate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessHsmProviderUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).HsmProviders input := pingaccess.UpdateHsmProviderCommandInput{ Body: *resourcePingAccessHsmProviderReadData(d), @@ -74,27 +77,25 @@ func resourcePingAccessHsmProviderUpdate(d *schema.ResourceData, m interface{}) } result, _, err := svc.UpdateHsmProviderCommand(&input) if err != nil { - return fmt.Errorf("Error updating HsmProvider: %s", err.Error()) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update HsmProvider: %s", err))} } d.SetId(result.Id.String()) return resourcePingAccessHsmProviderReadResult(d, result, svc) } -func resourcePingAccessHsmProviderDelete(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessHsmProviderDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).HsmProviders http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} _, err := svc.DeleteHsmProviderCommand(&pingaccess.DeleteHsmProviderCommandInput{Id: d.Id()}) if err != nil { - return fmt.Errorf("Error deleting HsmProvider: %s", err.Error()) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete HsmProvider: %s", err))} } return nil } -func resourcePingAccessHsmProviderReadResult(d *schema.ResourceData, input *pingaccess.HsmProviderView, svc *pingaccess.HsmProvidersService) error { - setResourceDataString(d, "name", input.Name) - setResourceDataString(d, "class_name", input.ClassName) - +func resourcePingAccessHsmProviderReadResult(d *schema.ResourceData, input *pingaccess.HsmProviderView, svc *pingaccess.HsmProvidersService) diag.Diagnostics { + var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) @@ -107,10 +108,10 @@ func resourcePingAccessHsmProviderReadResult(d *schema.ResourceData, input *ping config = maskConfigFromDescriptors(desc, input.ClassName, originalConfig, config) - if err := d.Set("configuration", config); err != nil { - return err - } - return nil + setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) + setResourceDataStringWithDiagnostic(d, "class_name", input.ClassName, &diags) + setResourceDataStringWithDiagnostic(d, "configuration", &config, &diags) + return diags } func resourcePingAccessHsmProviderReadData(d *schema.ResourceData) *pingaccess.HsmProviderView { diff --git a/pingaccess/resource_pingaccess_hsm_provider_test.go b/pingaccess/resource_pingaccess_hsm_provider_test.go index a98b37c7..bbd24b0c 100644 --- a/pingaccess/resource_pingaccess_hsm_provider_test.go +++ b/pingaccess/resource_pingaccess_hsm_provider_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessHsmProvider(t *testing.T) { @@ -29,10 +29,6 @@ func TestAccPingAccessHsmProvider(t *testing.T) { }) } -func testAccCheckPingAccessHsmProviderDestroy(s *terraform.State) error { - return nil -} - func testAccPingAccessHsmProviderConfig(name, configUpdate string) string { return fmt.Sprintf(` resource "pingaccess_hsm_provider" "acc_test_idm_%s" { @@ -59,8 +55,8 @@ func testAccCheckPingAccessHsmProviderExists(n string) resource.TestCheckFunc { return fmt.Errorf("No HsmProvider ID is set") } - conn := testAccProvider.Meta().(*pingaccess.Client).HsmProviders - result, _, err := conn.GetHsmProviderCommand(&pingaccess.GetHsmProviderCommandInput{ + conn := testAccProvider.Meta().(*pa.Client).HsmProviders + result, _, err := conn.GetHsmProviderCommand(&pa.GetHsmProviderCommandInput{ Id: rs.Primary.ID, }) @@ -78,13 +74,13 @@ func testAccCheckPingAccessHsmProviderExists(n string) resource.TestCheckFunc { func testAccCheckPingAccessHsmProviderAttributes(n, partition string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, _ := s.RootModule().Resources[n] + rs := s.RootModule().Resources[n] if rs.Primary.ID == "" || rs.Primary.ID == "0" { return fmt.Errorf("No HsmProvider ID is set") } - conn := testAccProvider.Meta().(*pingaccess.Client).HsmProviders - result, _, err := conn.GetHsmProviderCommand(&pingaccess.GetHsmProviderCommandInput{ + conn := testAccProvider.Meta().(*pa.Client).HsmProviders + result, _, err := conn.GetHsmProviderCommand(&pa.GetHsmProviderCommandInput{ Id: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_http_config_request_host_source.go b/pingaccess/resource_pingaccess_http_config_request_host_source.go index 0adb6de7..496a043e 100644 --- a/pingaccess/resource_pingaccess_http_config_request_host_source.go +++ b/pingaccess/resource_pingaccess_http_config_request_host_source.go @@ -1,21 +1,22 @@ package pingaccess import ( - "encoding/json" + "context" "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessHTTPConfigRequestHostSource() *schema.Resource { return &schema.Resource{ - Create: resourcePingAccessHTTPConfigRequestHostSourceCreate, - Read: resourcePingAccessHTTPConfigRequestHostSourceRead, - Update: resourcePingAccessHTTPConfigRequestHostSourceUpdate, - Delete: resourcePingAccessHTTPConfigRequestHostSourceDelete, + CreateContext: resourcePingAccessHTTPConfigRequestHostSourceCreate, + ReadContext: resourcePingAccessHTTPConfigRequestHostSourceRead, + UpdateContext: resourcePingAccessHTTPConfigRequestHostSourceUpdate, + DeleteContext: resourcePingAccessHTTPConfigRequestHostSourceDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: resourcePingAccessHTTPConfigRequestHostSourceResourceSchema(), } @@ -24,59 +25,56 @@ func resourcePingAccessHTTPConfigRequestHostSource() *schema.Resource { func resourcePingAccessHTTPConfigRequestHostSourceResourceSchema() map[string]*schema.Schema { return map[string]*schema.Schema{ "header_name_list": requiredListOfString(), - "list_value_location": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateListLocationValue, + "list_value_location": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: validateListLocationValue, }, } } -func resourcePingAccessHTTPConfigRequestHostSourceCreate(d *schema.ResourceData, m interface{}) error { - return resourcePingAccessHTTPConfigRequestHostSourceUpdate(d, m) +func resourcePingAccessHTTPConfigRequestHostSourceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + return resourcePingAccessHTTPConfigRequestHostSourceUpdate(ctx, d, m) } -func resourcePingAccessHTTPConfigRequestHostSourceRead(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessHTTPConfigRequestHostSourceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).HttpConfig result, _, err := svc.GetHostSourceCommand() if err != nil { - return fmt.Errorf("Error reading http config host source: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read HttpConfigHostSource: %s", err))} } return resourcePingAccessHTTPConfigRequestHostSourceReadResult(d, result) } -func resourcePingAccessHTTPConfigRequestHostSourceUpdate(d *schema.ResourceData, m interface{}) error { - +func resourcePingAccessHTTPConfigRequestHostSourceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).HttpConfig - input := &pa.UpdateHostSourceCommandInput{Body: *resourcePingAccessHTTPConfigRequestHostSourceReadData(d)} result, _, err := svc.UpdateHostSourceCommand(input) if err != nil { - b, _ := json.Marshal(input) - return fmt.Errorf("Error updating http config host source: %s, %s", err, string(b)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update HttpConfigHostSource: %s", err))} } d.SetId("http_config_host_source") return resourcePingAccessHTTPConfigRequestHostSourceReadResult(d, result) } -func resourcePingAccessHTTPConfigRequestHostSourceDelete(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessHTTPConfigRequestHostSourceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).HttpConfig _, err := svc.DeleteHostSourceCommand() if err != nil { - return fmt.Errorf("Error deleting http config host source: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete HttpConfigHostSource: %s", err))} + } return nil } -func resourcePingAccessHTTPConfigRequestHostSourceReadResult(d *schema.ResourceData, rv *pa.HostMultiValueSourceView) error { - if err := d.Set("list_value_location", rv.ListValueLocation); err != nil { - return err - } +func resourcePingAccessHTTPConfigRequestHostSourceReadResult(d *schema.ResourceData, rv *pa.HostMultiValueSourceView) diag.Diagnostics { + var diags diag.Diagnostics + setResourceDataStringWithDiagnostic(d, "list_value_location", rv.ListValueLocation, &diags) if err := d.Set("header_name_list", rv.HeaderNameList); err != nil { - return err + diags = append(diags, diag.FromErr(err)) } - return nil + return diags } func resourcePingAccessHTTPConfigRequestHostSourceReadData(d *schema.ResourceData) (body *pa.HostMultiValueSourceView) { diff --git a/pingaccess/resource_pingaccess_http_config_request_host_source_test.go b/pingaccess/resource_pingaccess_http_config_request_host_source_test.go index 58672973..70fb988c 100644 --- a/pingaccess/resource_pingaccess_http_config_request_host_source_test.go +++ b/pingaccess/resource_pingaccess_http_config_request_host_source_test.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -60,7 +59,7 @@ func testAccCheckPingAccessHTTPConfigRequestHostSourceExists(n string) resource. return fmt.Errorf("No http config request host source response ID is set") } - conn := testAccProvider.Meta().(*pingaccess.Client).HttpConfig + conn := testAccProvider.Meta().(*pa.Client).HttpConfig result, _, err := conn.GetHostSourceCommand() if err != nil { diff --git a/pingaccess/resource_pingaccess_https_listener.go b/pingaccess/resource_pingaccess_https_listener.go index 487a86e0..6419cb4d 100644 --- a/pingaccess/resource_pingaccess_https_listener.go +++ b/pingaccess/resource_pingaccess_https_listener.go @@ -1,20 +1,22 @@ package pingaccess import ( + "context" "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessHTTPSListener() *schema.Resource { return &schema.Resource{ - Create: resourcePingAccessHTTPSListenerCreate, - Read: resourcePingAccessHTTPSListenerRead, - Update: resourcePingAccessHTTPSListenerUpdate, - Delete: resourcePingAccessHTTPSListenerDelete, + CreateContext: resourcePingAccessHTTPSListenerCreate, + ReadContext: resourcePingAccessHTTPSListenerRead, + UpdateContext: resourcePingAccessHTTPSListenerUpdate, + DeleteContext: resourcePingAccessHTTPSListenerDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: resourcePingAccessHTTPSListenerSchema(), @@ -23,30 +25,30 @@ func resourcePingAccessHTTPSListener() *schema.Resource { func resourcePingAccessHTTPSListenerSchema() map[string]*schema.Schema { return map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateHTTPListenerName, - ForceNew: true, + "name": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: validateHTTPListenerName, + ForceNew: true, }, - "key_pair_id": &schema.Schema{ + "key_pair_id": { Type: schema.TypeInt, Required: true, }, - "use_server_cipher_suite_order": &schema.Schema{ + "use_server_cipher_suite_order": { Type: schema.TypeBool, Required: true, }, } } -func resourcePingAccessHTTPSListenerCreate(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).HttpsListeners - input := pingaccess.GetHttpsListenersCommandInput{} +func resourcePingAccessHTTPSListenerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).HttpsListeners + input := pa.GetHttpsListenersCommandInput{} result, _, err := svc.GetHttpsListenersCommand(&input) if err != nil { - return fmt.Errorf("Error retrieving HttpsListener list: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to retrieving listener: %s", err))} } name := d.Get("name").(string) @@ -56,48 +58,49 @@ func resourcePingAccessHTTPSListenerCreate(d *schema.ResourceData, m interface{} return resourcePingAccessHTTPSListenerReadResult(d, listener) } } - return fmt.Errorf("Error managing HttpsListener: %s", name) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to manage listener: %s", err))} } -func resourcePingAccessHTTPSListenerRead(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).HttpsListeners - input := &pingaccess.GetHttpsListenerCommandInput{ +func resourcePingAccessHTTPSListenerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).HttpsListeners + input := &pa.GetHttpsListenerCommandInput{ Id: d.Id(), } result, _, err := svc.GetHttpsListenerCommand(input) if err != nil { - return fmt.Errorf("Error reading HttpsListener: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read listener: %s", err))} } return resourcePingAccessHTTPSListenerReadResult(d, result) } -func resourcePingAccessHTTPSListenerUpdate(d *schema.ResourceData, m interface{}) error { - svc := m.(*pingaccess.Client).HttpsListeners - input := pingaccess.UpdateHttpsListenerInput{ +func resourcePingAccessHTTPSListenerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).HttpsListeners + input := pa.UpdateHttpsListenerInput{ Body: *resourcePingAccessHTTPSListenerReadData(d), Id: d.Id(), } result, _, err := svc.UpdateHttpsListener(&input) if err != nil { - return fmt.Errorf("Error updating HttpsListener: %s", err.Error()) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update listener: %s", err))} } return resourcePingAccessHTTPSListenerReadResult(d, result) } -func resourcePingAccessHTTPSListenerDelete(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessHTTPSListenerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { return nil } -func resourcePingAccessHTTPSListenerReadResult(d *schema.ResourceData, input *pingaccess.HttpsListenerView) error { - setResourceDataString(d, "name", input.Name) - setResourceDataInt(d, "key_pair_id", input.KeyPairId) - setResourceDataBool(d, "use_server_cipher_suite_order", input.UseServerCipherSuiteOrder) - return nil +func resourcePingAccessHTTPSListenerReadResult(d *schema.ResourceData, input *pa.HttpsListenerView) diag.Diagnostics { + var diags diag.Diagnostics + setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) + setResourceDataIntWithDiagnostic(d, "key_pair_id", input.KeyPairId, &diags) + setResourceDataBoolWithDiagnostic(d, "use_server_cipher_suite_order", input.UseServerCipherSuiteOrder, &diags) + return diags } -func resourcePingAccessHTTPSListenerReadData(d *schema.ResourceData) *pingaccess.HttpsListenerView { - engine := &pingaccess.HttpsListenerView{ +func resourcePingAccessHTTPSListenerReadData(d *schema.ResourceData) *pa.HttpsListenerView { + engine := &pa.HttpsListenerView{ Name: String(d.Get("name").(string)), KeyPairId: Int(d.Get("key_pair_id").(int)), UseServerCipherSuiteOrder: Bool(d.Get("use_server_cipher_suite_order").(bool)), diff --git a/pingaccess/resource_pingaccess_https_listener_test.go b/pingaccess/resource_pingaccess_https_listener_test.go index e44d9297..29914e3d 100644 --- a/pingaccess/resource_pingaccess_https_listener_test.go +++ b/pingaccess/resource_pingaccess_https_listener_test.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -55,20 +54,20 @@ func testAccCheckPingAccessHTTPSListenerExists(n string) resource.TestCheckFunc } if rs.Primary.ID == "" || rs.Primary.ID == "0" { - return fmt.Errorf("No HttpsListener ID is set") + return fmt.Errorf("No listener ID is set") } - conn := testAccProvider.Meta().(*pingaccess.Client).HttpsListeners - result, _, err := conn.GetHttpsListenerCommand(&pingaccess.GetHttpsListenerCommandInput{ + conn := testAccProvider.Meta().(*pa.Client).HttpsListeners + result, _, err := conn.GetHttpsListenerCommand(&pa.GetHttpsListenerCommandInput{ Id: rs.Primary.ID, }) if err != nil { - return fmt.Errorf("Error: HttpsListener (%s) not found", n) + return fmt.Errorf("Error: listener (%s) not found", n) } if *result.Name != rs.Primary.Attributes["name"] { - return fmt.Errorf("Error: HttpsListener response (%s) didnt match state (%s)", *result.Name, rs.Primary.Attributes["name"]) + return fmt.Errorf("Error: listener response (%s) didnt match state (%s)", *result.Name, rs.Primary.Attributes["name"]) } return nil @@ -77,10 +76,10 @@ func testAccCheckPingAccessHTTPSListenerExists(n string) resource.TestCheckFunc func Test_resourcePingAccessHTTPSListenerReadData(t *testing.T) { cases := []struct { - HttpsListener pa.HttpsListenerView + listener pa.HttpsListenerView }{ { - HttpsListener: pa.HttpsListenerView{ + listener: pa.HttpsListenerView{ Name: String("ADMIN"), KeyPairId: Int(1), UseServerCipherSuiteOrder: Bool(true), @@ -92,10 +91,10 @@ func Test_resourcePingAccessHTTPSListenerReadData(t *testing.T) { resourceSchema := resourcePingAccessHTTPSListenerSchema() resourceLocalData := schema.TestResourceDataRaw(t, resourceSchema, map[string]interface{}{}) - resourcePingAccessHTTPSListenerReadResult(resourceLocalData, &tc.HttpsListener) + resourcePingAccessHTTPSListenerReadResult(resourceLocalData, &tc.listener) - if got := *resourcePingAccessHTTPSListenerReadData(resourceLocalData); !cmp.Equal(got, tc.HttpsListener) { - t.Errorf("resourcePingAccessHTTPSListenerReadData() = %v", cmp.Diff(got, tc.HttpsListener)) + if got := *resourcePingAccessHTTPSListenerReadData(resourceLocalData); !cmp.Equal(got, tc.listener) { + t.Errorf("resourcePingAccessHTTPSListenerReadData() = %v", cmp.Diff(got, tc.listener)) } }) } diff --git a/pingaccess/resource_pingaccess_identity_mapping.go b/pingaccess/resource_pingaccess_identity_mapping.go index f868656a..9019bb14 100644 --- a/pingaccess/resource_pingaccess_identity_mapping.go +++ b/pingaccess/resource_pingaccess_identity_mapping.go @@ -1,45 +1,61 @@ package pingaccess import ( + "context" "crypto/tls" "encoding/json" "fmt" "log" "net/http" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessIdentityMapping() *schema.Resource { return &schema.Resource{ - Create: resourcePingAccessIdentityMappingCreate, - Read: resourcePingAccessIdentityMappingRead, - Update: resourcePingAccessIdentityMappingUpdate, - Delete: resourcePingAccessIdentityMappingDelete, + CreateContext: resourcePingAccessIdentityMappingCreate, + ReadContext: resourcePingAccessIdentityMappingRead, + UpdateContext: resourcePingAccessIdentityMappingUpdate, + DeleteContext: resourcePingAccessIdentityMappingDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, - Schema: map[string]*schema.Schema{ - "class_name": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "configuration": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: suppressEquivalentJsonDiffs, - }, + Schema: resourcePingAccessIdentityMappingSchema(), + CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { + svc := m.(*pingaccess.Client).IdentityMappings + desc, _, _ := svc.GetIdentityMappingDescriptorsCommand() + className := d.Get("class_name").(string) + if err := descriptorsHasClassName(className, desc); err != nil { + return err + } + return validateConfiguration(className, d, desc) }, } } -func resourcePingAccessIdentityMappingCreate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessIdentityMappingSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "class_name": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "configuration": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppressEquivalentJSONDiffs, + }, + } +} + +func resourcePingAccessIdentityMappingCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).IdentityMappings input := pingaccess.AddIdentityMappingCommandInput{ Body: *resourcePingAccessIdentityMappingReadData(d), @@ -47,26 +63,27 @@ func resourcePingAccessIdentityMappingCreate(d *schema.ResourceData, m interface result, _, err := svc.AddIdentityMappingCommand(&input) if err != nil { - return fmt.Errorf("Error creating IdentityMapping: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create IdentityMapping: %s", err))} } d.SetId(result.Id.String()) return resourcePingAccessIdentityMappingReadResult(d, result, svc) } -func resourcePingAccessIdentityMappingRead(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessIdentityMappingRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).IdentityMappings input := &pingaccess.GetIdentityMappingCommandInput{ Id: d.Id(), } result, _, err := svc.GetIdentityMappingCommand(input) if err != nil { - return fmt.Errorf("Error reading IdentityMapping: %s", err) + + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read IdentityMapping: %s", err))} } return resourcePingAccessIdentityMappingReadResult(d, result, svc) } -func resourcePingAccessIdentityMappingUpdate(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessIdentityMappingUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).IdentityMappings input := pingaccess.UpdateIdentityMappingCommandInput{ Body: *resourcePingAccessIdentityMappingReadData(d), @@ -75,29 +92,27 @@ func resourcePingAccessIdentityMappingUpdate(d *schema.ResourceData, m interface result, _, err := svc.UpdateIdentityMappingCommand(&input) if err != nil { - return fmt.Errorf("Error creating IdentityMapping: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update IdentityMapping: %s", err))} } d.SetId(result.Id.String()) return resourcePingAccessIdentityMappingReadResult(d, result, svc) } -func resourcePingAccessIdentityMappingDelete(d *schema.ResourceData, m interface{}) error { +func resourcePingAccessIdentityMappingDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).IdentityMappings http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} log.Printf("[INFO] ResourceID: %s", d.Id()) _, err := svc.DeleteIdentityMappingCommand(&pingaccess.DeleteIdentityMappingCommandInput{Id: d.Id()}) if err != nil { - return fmt.Errorf("Error deleting IdentityMapping: %s", err) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete IdentityMapping: %s", err))} } return nil } -func resourcePingAccessIdentityMappingReadResult(d *schema.ResourceData, input *pingaccess.IdentityMappingView, svc *pingaccess.IdentityMappingsService) error { - setResourceDataString(d, "name", input.Name) - setResourceDataString(d, "class_name", input.ClassName) - +func resourcePingAccessIdentityMappingReadResult(d *schema.ResourceData, input *pingaccess.IdentityMappingView, svc *pingaccess.IdentityMappingsService) diag.Diagnostics { + var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) @@ -109,10 +124,10 @@ func resourcePingAccessIdentityMappingReadResult(d *schema.ResourceData, input * desc, _, _ := svc.GetIdentityMappingDescriptorsCommand() config = maskConfigFromDescriptors(desc, input.ClassName, originalConfig, config) - if err := d.Set("configuration", config); err != nil { - return err - } - return nil + setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) + setResourceDataStringWithDiagnostic(d, "class_name", input.ClassName, &diags) + setResourceDataStringWithDiagnostic(d, "configuration", &config, &diags) + return diags } func resourcePingAccessIdentityMappingReadData(d *schema.ResourceData) *pingaccess.IdentityMappingView { diff --git a/pingaccess/resource_pingaccess_identity_mapping_test.go b/pingaccess/resource_pingaccess_identity_mapping_test.go index 77ca2b9b..5417d2db 100644 --- a/pingaccess/resource_pingaccess_identity_mapping_test.go +++ b/pingaccess/resource_pingaccess_identity_mapping_test.go @@ -3,14 +3,24 @@ package pingaccess import ( "encoding/json" "fmt" + "net/http" + "net/http/httptest" + "net/url" + "regexp" "testing" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessIdentityMapping(t *testing.T) { + resourceName := "pingaccess_identity_mapping.acc_test_idm" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -19,17 +29,31 @@ func TestAccPingAccessIdentityMapping(t *testing.T) { { Config: testAccPingAccessIdentityMappingConfig("bar", "SUB_HEADER"), Check: resource.ComposeTestCheckFunc( - testAccCheckPingAccessIdentityMappingExists("pingaccess_identity_mapping.acc_test_idm_bar"), - testAccCheckPingAccessIdentityMappingAttributes("pingaccess_identity_mapping.acc_test_idm_bar"), + testAccCheckPingAccessIdentityMappingExists(resourceName), + testAccCheckPingAccessIdentityMappingAttributes(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", "bar"), + resource.TestCheckResourceAttr(resourceName, "class_name", "com.pingidentity.pa.identitymappings.HeaderIdentityMapping"), + resource.TestCheckResourceAttrSet(resourceName, "configuration"), ), }, { Config: testAccPingAccessIdentityMappingConfig("bar", "SUB"), Check: resource.ComposeTestCheckFunc( - testAccCheckPingAccessIdentityMappingExists("pingaccess_identity_mapping.acc_test_idm_bar"), - testAccCheckPingAccessIdentityMappingAttributes("pingaccess_identity_mapping.acc_test_idm_bar"), + testAccCheckPingAccessIdentityMappingExists(resourceName), + testAccCheckPingAccessIdentityMappingAttributes(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", "bar"), + resource.TestCheckResourceAttr(resourceName, "class_name", "com.pingidentity.pa.identitymappings.HeaderIdentityMapping"), + resource.TestCheckResourceAttrSet(resourceName, "configuration"), ), }, + { + Config: testAccPingAccessIdentityMappingConfigWrongClassName(), + ExpectError: regexp.MustCompile(`unable to find className 'com.pingidentity.pa.identitymappings.foo' available className's: com.pingidentity.pa.identitymappings.HeaderIdentityMapping, com.pingidentity.pa.identitymappings.JwtIdentityMapping`), + }, + { + Config: testAccPingAccessIdentityMappingConfigMissingRequired(), + ExpectError: regexp.MustCompile(`configuration validation failed against the class descriptor definition\nthe field 'headerName' is required for the class_name 'com.pingidentity.pa.identitymappings.JwtIdentityMapping'\nthe field 'audience' is required for the class_name 'com.pingidentity.pa.identitymappings.JwtIdentityMapping'`), + }, }, }) } @@ -40,7 +64,7 @@ func testAccCheckPingAccessIdentityMappingDestroy(s *terraform.State) error { func testAccPingAccessIdentityMappingConfig(name, configUpdate string) string { return fmt.Sprintf(` - resource "pingaccess_identity_mapping" "acc_test_idm_%s" { + resource "pingaccess_identity_mapping" "acc_test_idm" { class_name = "com.pingidentity.pa.identitymappings.HeaderIdentityMapping" name = "%s" configuration = < 0 { s["web"] = flattenPolicyItem(*val) @@ -286,13 +284,11 @@ func flattenPolicy(in map[string]*[]*pa.PolicyItem) []interface{} { // Takes the result of flatmap.Expand for an array of strings // and returns a []string func expandStringList(configured []interface{}) []*string { - // log.Printf("[INFO] expandStringList %d", len(configured)) vs := make([]*string, 0, len(configured)) for _, v := range configured { val := v.(string) if val != "" { vs = append(vs, &val) - // log.Printf("[DEBUG] Appending: %s", val) } } return vs @@ -313,7 +309,7 @@ func expandIntList(configured []interface{}) []*int { } func flattenChainCertificates(in []*pa.ChainCertificateView) *schema.Set { - m := []interface{}{} + var m []interface{} for _, v := range in { s := make(map[string]interface{}) s["alias"] = *v.Alias @@ -350,20 +346,6 @@ func flattenLinkViewList(in []*pa.LinkView) []interface{} { return m } -func expandLinkView(in []interface{}) *pa.LinkView { - ca := &pa.LinkView{} - for _, raw := range in { - l := raw.(map[string]interface{}) - if val, ok := l["id"]; ok { - ca.Id = String(val.(string)) - } - if val, ok := l["location"]; ok { - ca.Location = String(val.(string)) - } - } - return ca -} - func flattenLinkView(in *pa.LinkView) map[string]interface{} { s := make(map[string]interface{}) if in.Id != nil { @@ -440,3 +422,85 @@ func maskConfigFromConfigurationField(field *pa.ConfigurationField, input *strin } return config } + +//Checks all the fields in the descriptor to ensure all required fields are set +// +func descriptorsHasClassName(className string, desc *pa.DescriptorsView) error { + var classes []string + for _, value := range desc.Items { + classes = append(classes, *value.ClassName) + if *value.ClassName == className { + return nil + } + } + return fmt.Errorf("unable to find className '%s' available className's: %s", className, strings.Join(classes, ", ")) +} + +//Checks the class name specified exists in the DescriptorsView +// +func validateConfiguration(className string, d *schema.ResourceDiff, desc *pa.DescriptorsView) error { + var diags diag.Diagnostics + conf := d.Get("configuration").(string) + for _, value := range desc.Items { + if *value.ClassName == className { + for _, f := range value.ConfigurationFields { + if *f.Required { + if v := gjson.Get(conf, *f.Name); !v.Exists() { + diags = append(diags, diag.FromErr(fmt.Errorf("the field '%s' is required for the class_name '%s'", *f.Name, className))) + } + } + } + } + } + if diags.HasError() { + msgs := []string{ + "configuration validation failed against the class descriptor definition", + } + for _, diagnostic := range diags { + msgs = append(msgs, diagnostic.Summary) + } + return fmt.Errorf(strings.Join(msgs, "\n")) + } + return nil +} + +//Checks the class name specified exists in the RuleDescriptorsView +// +func ruleDescriptorsHasClassName(className string, desc *pa.RuleDescriptorsView) error { + var classes []string + for _, value := range desc.Items { + classes = append(classes, *value.ClassName) + if *value.ClassName == className { + return nil + } + } + return fmt.Errorf("unable to find className '%s' available className's: %s", className, strings.Join(classes, ", ")) +} + +//Checks all the fields in the Rule descriptor to ensure all required fields are set +// +func validateRulesConfiguration(className string, d *schema.ResourceDiff, desc *pa.RuleDescriptorsView) error { + var diags diag.Diagnostics + conf := d.Get("configuration").(string) + for _, value := range desc.Items { + if *value.ClassName == className { + for _, f := range value.ConfigurationFields { + if *f.Required { + if v := gjson.Get(conf, *f.Name); !v.Exists() { + diags = append(diags, diag.FromErr(fmt.Errorf("the field '%s' is required for the class_name '%s'", *f.Name, className))) + } + } + } + } + } + if diags.HasError() { + msgs := []string{ + "configuration validation failed against the class descriptor definition", + } + for _, diagnostic := range diags { + msgs = append(msgs, diagnostic.Summary) + } + return fmt.Errorf(strings.Join(msgs, "\n")) + } + return nil +} diff --git a/pingaccess/validators.go b/pingaccess/validators.go index d5aeecb1..53fcf373 100644 --- a/pingaccess/validators.go +++ b/pingaccess/validators.go @@ -2,100 +2,111 @@ package pingaccess import ( "fmt" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" ) -func validateWebOrAPI(value interface{}, field string) (warns []string, errs []error) { +func validateWebOrAPI(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "Web" && v != "API" { - errs = append(errs, fmt.Errorf("%q must be either 'Web' or 'API' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Web' or 'API' not %s", v))} } - return + return nil } -func validateRuleOrRuleSet(value interface{}, field string) (warns []string, errs []error) { +func validateRuleOrRuleSet(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "Rule" && v != "RuleSet" { - errs = append(errs, fmt.Errorf("%q must be either 'Rule' or 'RuleSet' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Rule' or 'RuleSet' not %s", v))} } - return + return nil } -func validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(value interface{}, field string) (warns []string, errs []error) { +func validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "SuccessIfAllSucceed" && v != "SuccessIfAnyOneSucceeds" { - errs = append(errs, fmt.Errorf("%q must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not %s", v))} } - return + return nil } -func validateAudience(value interface{}, field string) (warns []string, errs []error) { +func validateAudience(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if len(v) < 1 || len(v) > 31 { - errs = append(errs, fmt.Errorf("%q must be between 1 and 32 characters not %d", field, len(v))) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not %d", len(v)))} } - return + return nil } -func validateCookieType(value interface{}, field string) (warns []string, errs []error) { +func validateCookieType(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "Encrypted" && v != "Signed" { - errs = append(errs, fmt.Errorf("%q must be either 'Encrypted' or 'Signed' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Encrypted' or 'Signed' not %s", v))} } - return + return nil } -func validateOidcLoginType(value interface{}, field string) (warns []string, errs []error) { +func validateOidcLoginType(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "Code" && v != "POST" && v != "x_post" { - errs = append(errs, fmt.Errorf("%q must be either 'Code', 'POST' or 'x_post' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Code', 'POST' or 'x_post' not %s", v))} + } + return nil +} + +func validatePkceChallengeType(value interface{}, path cty.Path) diag.Diagnostics { + v := value.(string) + if v != "SHA256" && v != "OFF" { + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'SHA256' or 'OFF' not %s", v))} } - return + return nil } -func validateRequestPreservationType(value interface{}, field string) (warns []string, errs []error) { +func validateRequestPreservationType(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "None" && v != "POST" && v != "All" { - errs = append(errs, fmt.Errorf("%q must be either 'None', 'POST' or 'All' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'None', 'POST' or 'All' not %s", v))} } - return + return nil } -func validateWebStorageType(value interface{}, field string) (warns []string, errs []error) { +func validateWebStorageType(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "SessionStorage" && v != "LocalStorage" { - errs = append(errs, fmt.Errorf("%q must be either 'SessionStorage' or 'LocalStorage' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'SessionStorage' or 'LocalStorage' not %s", v))} } - return + return nil } -func validateListLocationValue(value interface{}, field string) (warns []string, errs []error) { +func validateListLocationValue(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "FIRST" && v != "LAST" { - errs = append(errs, fmt.Errorf("%q must be either 'FIRST' or 'LAST' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'FIRST' or 'LAST' not %s", v))} } - return + return nil } -func validateHTTPListenerName(value interface{}, field string) (warns []string, errs []error) { +func validateHTTPListenerName(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "ADMIN" && v != "AGENT" && v != "ENGINE" { - errs = append(errs, fmt.Errorf("%q must be either 'ADMIN', 'AGENT' or 'ENGINE' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'ADMIN', 'AGENT' or 'ENGINE' not %s", v))} } - return + return nil } -func validateWebSessionSameSite(value interface{}, field string) (warns []string, errs []error) { +func validateWebSessionSameSite(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "Disabled" && v != "Lax" && v != "None" { - errs = append(errs, fmt.Errorf("%q must be either 'Disabled', 'Lax' or 'None' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Disabled', 'Lax' or 'None' not %s", v))} } - return + return nil } -func validateAuditLevel(value interface{}, field string) (warns []string, errs []error) { +func validateAuditLevel(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "ON" && v != "OFF" { - errs = append(errs, fmt.Errorf("%q must be either 'ON' or 'OFF' not %s", field, v)) + return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'ON' or 'OFF' not %s", v))} } - return + return nil } diff --git a/pingaccess/validators_test.go b/pingaccess/validators_test.go index 84aed233..ab446b8d 100644 --- a/pingaccess/validators_test.go +++ b/pingaccess/validators_test.go @@ -2,491 +2,404 @@ package pingaccess import ( "fmt" - "reflect" "testing" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" ) func Test_validateWebOrAPI(t *testing.T) { - type args struct { - value interface{} - field string - } tests := []struct { - name string - args args - wantWarns []string - wantErrs []error + name string + value interface{} + expectedDiags diag.Diagnostics }{ { - name: "Web passes", - args: args{ - value: "Web", - field: "default_auth_type_override", - }, - wantWarns: nil, - wantErrs: nil, + name: "Web passes", + value: "Web", + expectedDiags: nil, }, { - name: "API passes", - args: args{ - value: "API", - field: "default_auth_type_override", - }, - wantWarns: nil, - wantErrs: nil, + name: "API passes", + value: "API", + expectedDiags: nil, }, { - name: "junk does not pass", - args: args{ - value: "other", - field: "default_auth_type_override", - }, - wantWarns: nil, - wantErrs: []error{fmt.Errorf("%q must be either 'Web' or 'API' not %s", "default_auth_type_override", "other")}, + name: "junk does not pass", + value: "other", + expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Web' or 'API' not other"))}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotWarns, gotErrs := validateWebOrAPI(tt.args.value, tt.args.field) - if !reflect.DeepEqual(gotWarns, tt.wantWarns) { - t.Errorf("validateWebOrAPI() gotWarns = %v, want %v", gotWarns, tt.wantWarns) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + diags := validateWebOrAPI(tc.value, cty.Path{}) + if len(diags) != len(tc.expectedDiags) { + t.Fatalf("%s: wrong number of diags, expected %d, got %d", tc.name, len(tc.expectedDiags), len(diags)) } - if !reflect.DeepEqual(gotErrs, tt.wantErrs) { - t.Errorf("validateWebOrAPI() gotErrs = %v, want %v", gotErrs, tt.wantErrs) + for j := range diags { + if diags[j].Severity != tc.expectedDiags[j].Severity { + t.Fatalf("%s: expected severity %v, got %v", tc.name, tc.expectedDiags[j].Severity, diags[j].Severity) + } + if !diags[j].AttributePath.Equals(tc.expectedDiags[j].AttributePath) { + t.Fatalf("%s: attribute paths do not match expected: %v, got %v", tc.name, tc.expectedDiags[j].AttributePath, diags[j].AttributePath) + } + if diags[j].Summary != tc.expectedDiags[j].Summary { + t.Fatalf("%s: summary does not match expected: %v, got %v", tc.name, tc.expectedDiags[j].Summary, diags[j].Summary) + } } }) } } func Test_validateRuleOrRuleSet(t *testing.T) { - type args struct { - value interface{} - field string - } tests := []struct { - name string - args args - wantWarns []string - wantErrs []error + name string + value interface{} + expectedDiags diag.Diagnostics }{ { - name: "Rule passes", - args: args{ - value: "Rule", - field: "elementType", - }, - wantWarns: nil, - wantErrs: nil, + name: "Rule passes", + value: "Rule", + expectedDiags: nil, }, { - name: "RuleSet passes", - args: args{ - value: "RuleSet", - field: "elementType", - }, - wantWarns: nil, - wantErrs: nil, + name: "RuleSet passes", + value: "RuleSet", + expectedDiags: nil, }, { - name: "junk does not pass", - args: args{ - value: "other", - field: "elementType", - }, - wantWarns: nil, - wantErrs: []error{fmt.Errorf("%q must be either 'Rule' or 'RuleSet' not %s", "elementType", "other")}, + name: "junk does not pass", + value: "other", + expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Rule' or 'RuleSet' not other"))}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotWarns, gotErrs := validateRuleOrRuleSet(tt.args.value, tt.args.field) - if !reflect.DeepEqual(gotWarns, tt.wantWarns) { - t.Errorf("validateRuleOrRuleSet() gotWarns = %v, want %v", gotWarns, tt.wantWarns) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + diags := validateRuleOrRuleSet(tc.value, cty.Path{}) + if len(diags) != len(tc.expectedDiags) { + t.Fatalf("%s: wrong number of diags, expected %d, got %d", tc.name, len(tc.expectedDiags), len(diags)) } - if !reflect.DeepEqual(gotErrs, tt.wantErrs) { - t.Errorf("validateRuleOrRuleSet() gotErrs = %v, want %v", gotErrs, tt.wantErrs) + for j := range diags { + if diags[j].Severity != tc.expectedDiags[j].Severity { + t.Fatalf("%s: expected severity %v, got %v", tc.name, tc.expectedDiags[j].Severity, diags[j].Severity) + } + if !diags[j].AttributePath.Equals(tc.expectedDiags[j].AttributePath) { + t.Fatalf("%s: attribute paths do not match expected: %v, got %v", tc.name, tc.expectedDiags[j].AttributePath, diags[j].AttributePath) + } + if diags[j].Summary != tc.expectedDiags[j].Summary { + t.Fatalf("%s: summary does not match expected: %v, got %v", tc.name, tc.expectedDiags[j].Summary, diags[j].Summary) + } } }) } } func Test_validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(t *testing.T) { - type args struct { - value interface{} - field string - } tests := []struct { - name string - args args - wantWarns []string - wantErrs []error + name string + value interface{} + expectedDiags diag.Diagnostics }{ { - name: "SuccessIfAllSucceed passes", - args: args{ - value: "SuccessIfAllSucceed", - field: "successCriteria", - }, - wantWarns: nil, - wantErrs: nil, + name: "SuccessIfAllSucceed passes", + value: "SuccessIfAllSucceed", + expectedDiags: nil, }, { - name: "SuccessIfAnyOneSucceeds passes", - args: args{ - value: "SuccessIfAnyOneSucceeds", - field: "successCriteria", - }, - wantWarns: nil, - wantErrs: nil, + name: "SuccessIfAnyOneSucceeds passes", + value: "SuccessIfAnyOneSucceeds", + expectedDiags: nil, }, { - name: "junk does not pass", - args: args{ - value: "other", - field: "successCriteria", - }, - wantWarns: nil, - wantErrs: []error{fmt.Errorf("%q must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not %s", "successCriteria", "other")}, + name: "junk does not pass", + value: "other", + expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not other"))}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotWarns, gotErrs := validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(tt.args.value, tt.args.field) - if !reflect.DeepEqual(gotWarns, tt.wantWarns) { - t.Errorf("validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds() gotWarns = %v, want %v", gotWarns, tt.wantWarns) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + diags := validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(tc.value, cty.Path{}) + if len(diags) != len(tc.expectedDiags) { + t.Fatalf("%s: wrong number of diags, expected %d, got %d", tc.name, len(tc.expectedDiags), len(diags)) } - if !reflect.DeepEqual(gotErrs, tt.wantErrs) { - t.Errorf("validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds() gotErrs = %v, want %v", gotErrs, tt.wantErrs) + for j := range diags { + if diags[j].Severity != tc.expectedDiags[j].Severity { + t.Fatalf("%s: expected severity %v, got %v", tc.name, tc.expectedDiags[j].Severity, diags[j].Severity) + } + if !diags[j].AttributePath.Equals(tc.expectedDiags[j].AttributePath) { + t.Fatalf("%s: attribute paths do not match expected: %v, got %v", tc.name, tc.expectedDiags[j].AttributePath, diags[j].AttributePath) + } + if diags[j].Summary != tc.expectedDiags[j].Summary { + t.Fatalf("%s: summary does not match expected: %v, got %v", tc.name, tc.expectedDiags[j].Summary, diags[j].Summary) + } } }) } } func Test_validateAudience(t *testing.T) { - type args struct { - value interface{} - field string - } tests := []struct { - name string - args args - wantWarns []string - wantErrs []error + name string + value interface{} + expectedDiags diag.Diagnostics }{ { - name: "Normal value passes", - args: args{ - value: "normal", - field: "audience", - }, - wantWarns: nil, - wantErrs: nil, + name: "Normal value passes", + value: "normal", + expectedDiags: nil, }, { - name: "Empty value failes", - args: args{ - value: "", - field: "audience", - }, - wantWarns: nil, - wantErrs: []error{fmt.Errorf("%q must be between 1 and 32 characters not %s", "audience", "0")}, + name: "Empty value failes", + value: "", + expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not 0"))}, }, { - name: "Very long value failes", - args: args{ - value: "12345678901234567890123456789012", - field: "audience", - }, - wantWarns: nil, - wantErrs: []error{fmt.Errorf("%q must be between 1 and 32 characters not %s", "audience", "32")}, + name: "Very long value fails", + value: "12345678901234567890123456789012", + expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not 32"))}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotWarns, gotErrs := validateAudience(tt.args.value, tt.args.field) - if !reflect.DeepEqual(gotWarns, tt.wantWarns) { - t.Errorf("validateAudience() gotWarns = %v, want %v", gotWarns, tt.wantWarns) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + diags := validateAudience(tc.value, cty.Path{}) + if len(diags) != len(tc.expectedDiags) { + t.Fatalf("%s: wrong number of diags, expected %d, got %d", tc.name, len(tc.expectedDiags), len(diags)) } - if !reflect.DeepEqual(gotErrs, tt.wantErrs) { - t.Errorf("validateAudience() gotErrs = %v, want %v", gotErrs, tt.wantErrs) + for j := range diags { + if diags[j].Severity != tc.expectedDiags[j].Severity { + t.Fatalf("%s: expected severity %v, got %v", tc.name, tc.expectedDiags[j].Severity, diags[j].Severity) + } + if !diags[j].AttributePath.Equals(tc.expectedDiags[j].AttributePath) { + t.Fatalf("%s: attribute paths do not match expected: %v, got %v", tc.name, tc.expectedDiags[j].AttributePath, diags[j].AttributePath) + } + if diags[j].Summary != tc.expectedDiags[j].Summary { + t.Fatalf("%s: summary does not match expected: %v, got %v", tc.name, tc.expectedDiags[j].Summary, diags[j].Summary) + } } }) } } func Test_validateCookieType(t *testing.T) { - type args struct { - value interface{} - field string - } tests := []struct { - name string - args args - wantWarns []string - wantErrs []error + name string + value interface{} + expectedDiags diag.Diagnostics }{ { - name: "Encrypted passes", - args: args{ - value: "Encrypted", - field: "cookie_type", - }, - wantWarns: nil, - wantErrs: nil, + name: "Encrypted passes", + value: "Encrypted", + expectedDiags: nil, }, { - name: "Signed passes", - args: args{ - value: "Signed", - field: "cookie_type", - }, - wantWarns: nil, - wantErrs: nil, + name: "Signed passes", + value: "Signed", + expectedDiags: nil, }, { - name: "junk does not pass", - args: args{ - value: "other", - field: "cookie_type", - }, - wantWarns: nil, - wantErrs: []error{fmt.Errorf("%q must be either 'Encrypted' or 'Signed' not %s", "cookie_type", "other")}, + name: "junk does not pass", + value: "other", + expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Encrypted' or 'Signed' not other"))}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotWarns, gotErrs := validateCookieType(tt.args.value, tt.args.field) - if !reflect.DeepEqual(gotWarns, tt.wantWarns) { - t.Errorf("validateCookieType() gotWarns = %v, want %v", gotWarns, tt.wantWarns) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + diags := validateCookieType(tc.value, cty.Path{}) + if len(diags) != len(tc.expectedDiags) { + t.Fatalf("%s: wrong number of diags, expected %d, got %d", tc.name, len(tc.expectedDiags), len(diags)) } - if !reflect.DeepEqual(gotErrs, tt.wantErrs) { - t.Errorf("validateCookieType() gotErrs = %v, want %v", gotErrs, tt.wantErrs) + for j := range diags { + if diags[j].Severity != tc.expectedDiags[j].Severity { + t.Fatalf("%s: expected severity %v, got %v", tc.name, tc.expectedDiags[j].Severity, diags[j].Severity) + } + if !diags[j].AttributePath.Equals(tc.expectedDiags[j].AttributePath) { + t.Fatalf("%s: attribute paths do not match expected: %v, got %v", tc.name, tc.expectedDiags[j].AttributePath, diags[j].AttributePath) + } + if diags[j].Summary != tc.expectedDiags[j].Summary { + t.Fatalf("%s: summary does not match expected: %v, got %v", tc.name, tc.expectedDiags[j].Summary, diags[j].Summary) + } } }) } } func Test_validateOidcLoginType(t *testing.T) { - type args struct { - value interface{} - field string - } tests := []struct { - name string - args args - wantWarns []string - wantErrs []error + name string + value interface{} + expectedDiags diag.Diagnostics }{ { - name: "Code passes", - args: args{ - value: "Code", - field: "oidc_login_type", - }, - wantWarns: nil, - wantErrs: nil, + name: "Code passes", + value: "Code", + expectedDiags: nil, }, { - name: "POST passes", - args: args{ - value: "POST", - field: "oidc_login_type", - }, - wantWarns: nil, - wantErrs: nil, + name: "POST passes", + value: "POST", + expectedDiags: nil, }, { - name: "x_post passes", - args: args{ - value: "x_post", - field: "oidc_login_type", - }, - wantWarns: nil, - wantErrs: nil, + name: "x_post passes", + value: "x_post", + expectedDiags: nil, }, { - name: "junk does not pass", - args: args{ - value: "other", - field: "oidc_login_type", - }, - wantWarns: nil, - wantErrs: []error{fmt.Errorf("%q must be either 'Code', 'POST' or 'x_post' not %s", "oidc_login_type", "other")}, + name: "junk does not pass", + value: "other", + expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Code', 'POST' or 'x_post' not other"))}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotWarns, gotErrs := validateOidcLoginType(tt.args.value, tt.args.field) - if !reflect.DeepEqual(gotWarns, tt.wantWarns) { - t.Errorf("validateOidcLoginType() gotWarns = %v, want %v", gotWarns, tt.wantWarns) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + diags := validateOidcLoginType(tc.value, cty.Path{}) + if len(diags) != len(tc.expectedDiags) { + t.Fatalf("%s: wrong number of diags, expected %d, got %d", tc.name, len(tc.expectedDiags), len(diags)) } - if !reflect.DeepEqual(gotErrs, tt.wantErrs) { - t.Errorf("validateOidcLoginType() gotErrs = %v, want %v", gotErrs, tt.wantErrs) + for j := range diags { + if diags[j].Severity != tc.expectedDiags[j].Severity { + t.Fatalf("%s: expected severity %v, got %v", tc.name, tc.expectedDiags[j].Severity, diags[j].Severity) + } + if !diags[j].AttributePath.Equals(tc.expectedDiags[j].AttributePath) { + t.Fatalf("%s: attribute paths do not match expected: %v, got %v", tc.name, tc.expectedDiags[j].AttributePath, diags[j].AttributePath) + } + if diags[j].Summary != tc.expectedDiags[j].Summary { + t.Fatalf("%s: summary does not match expected: %v, got %v", tc.name, tc.expectedDiags[j].Summary, diags[j].Summary) + } } }) } } func Test_validateRequestPreservationType(t *testing.T) { - type args struct { - value interface{} - field string - } tests := []struct { - name string - args args - wantWarns []string - wantErrs []error + name string + value interface{} + expectedDiags diag.Diagnostics }{ { - name: "None passes", - args: args{ - value: "None", - field: "refresh_user_info_claims_interval", - }, - wantWarns: nil, - wantErrs: nil, + name: "None passes", + value: "None", + expectedDiags: nil, }, { - name: "POST passes", - args: args{ - value: "POST", - field: "refresh_user_info_claims_interval", - }, - wantWarns: nil, - wantErrs: nil, + name: "POST passes", + value: "POST", + expectedDiags: nil, }, { - name: "All passes", - args: args{ - value: "All", - field: "refresh_user_info_claims_interval", - }, - wantWarns: nil, - wantErrs: nil, + name: "All passes", + value: "All", + expectedDiags: nil, }, { - name: "junk does not pass", - args: args{ - value: "other", - field: "refresh_user_info_claims_interval", - }, - wantWarns: nil, - wantErrs: []error{fmt.Errorf("%q must be either 'None', 'POST' or 'All' not %s", "refresh_user_info_claims_interval", "other")}, + name: "junk does not pass", + value: "other", + expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'None', 'POST' or 'All' not other"))}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotWarns, gotErrs := validateRequestPreservationType(tt.args.value, tt.args.field) - if !reflect.DeepEqual(gotWarns, tt.wantWarns) { - t.Errorf("validateRequestPreservationType() gotWarns = %v, want %v", gotWarns, tt.wantWarns) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + diags := validateRequestPreservationType(tc.value, cty.Path{}) + if len(diags) != len(tc.expectedDiags) { + t.Fatalf("%s: wrong number of diags, expected %d, got %d", tc.name, len(tc.expectedDiags), len(diags)) } - if !reflect.DeepEqual(gotErrs, tt.wantErrs) { - t.Errorf("validateRequestPreservationType() gotErrs = %v, want %v", gotErrs, tt.wantErrs) + for j := range diags { + if diags[j].Severity != tc.expectedDiags[j].Severity { + t.Fatalf("%s: expected severity %v, got %v", tc.name, tc.expectedDiags[j].Severity, diags[j].Severity) + } + if !diags[j].AttributePath.Equals(tc.expectedDiags[j].AttributePath) { + t.Fatalf("%s: attribute paths do not match expected: %v, got %v", tc.name, tc.expectedDiags[j].AttributePath, diags[j].AttributePath) + } + if diags[j].Summary != tc.expectedDiags[j].Summary { + t.Fatalf("%s: summary does not match expected: %v, got %v", tc.name, tc.expectedDiags[j].Summary, diags[j].Summary) + } } }) } } func Test_validateWebStorageType(t *testing.T) { - type args struct { - value interface{} - field string - } tests := []struct { - name string - args args - wantWarns []string - wantErrs []error + name string + value interface{} + expectedDiags diag.Diagnostics }{ { - name: "SessionStorage passes", - args: args{ - value: "SessionStorage", - field: "web_storage_type", - }, - wantWarns: nil, - wantErrs: nil, + name: "SessionStorage passes", + value: "SessionStorage", + expectedDiags: nil, }, { - name: "LocalStorage passes", - args: args{ - value: "LocalStorage", - field: "web_storage_type", - }, - wantWarns: nil, - wantErrs: nil, + name: "LocalStorage passes", + value: "LocalStorage", + expectedDiags: nil, }, { - name: "junk does not pass", - args: args{ - value: "other", - field: "web_storage_type", - }, - wantWarns: nil, - wantErrs: []error{fmt.Errorf("%q must be either 'SessionStorage' or 'LocalStorage' not %s", "web_storage_type", "other")}, + name: "junk does not pass", + value: "other", + expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'SessionStorage' or 'LocalStorage' not other"))}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotWarns, gotErrs := validateWebStorageType(tt.args.value, tt.args.field) - if !reflect.DeepEqual(gotWarns, tt.wantWarns) { - t.Errorf("validateWebStorageType() gotWarns = %v, want %v", gotWarns, tt.wantWarns) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + diags := validateWebStorageType(tc.value, cty.Path{}) + if len(diags) != len(tc.expectedDiags) { + t.Fatalf("%s: wrong number of diags, expected %d, got %d", tc.name, len(tc.expectedDiags), len(diags)) } - if !reflect.DeepEqual(gotErrs, tt.wantErrs) { - t.Errorf("validateWebStorageType() gotErrs = %v, want %v", gotErrs, tt.wantErrs) + for j := range diags { + if diags[j].Severity != tc.expectedDiags[j].Severity { + t.Fatalf("%s: expected severity %v, got %v", tc.name, tc.expectedDiags[j].Severity, diags[j].Severity) + } + if !diags[j].AttributePath.Equals(tc.expectedDiags[j].AttributePath) { + t.Fatalf("%s: attribute paths do not match expected: %v, got %v", tc.name, tc.expectedDiags[j].AttributePath, diags[j].AttributePath) + } + if diags[j].Summary != tc.expectedDiags[j].Summary { + t.Fatalf("%s: summary does not match expected: %v, got %v", tc.name, tc.expectedDiags[j].Summary, diags[j].Summary) + } } }) } } func Test_validateListLocationValue(t *testing.T) { - type args struct { - value interface{} - field string - } tests := []struct { - name string - args args - wantWarns []string - wantErrs []error + name string + value interface{} + expectedDiags diag.Diagnostics }{ { - name: "FIRST passes", - args: args{ - value: "FIRST", - field: "list_value_location", - }, - wantWarns: nil, - wantErrs: nil, + name: "FIRST passes", + value: "FIRST", + expectedDiags: nil, }, { - name: "LAST passes", - args: args{ - value: "LAST", - field: "list_value_location", - }, - wantWarns: nil, - wantErrs: nil, + name: "LAST passes", + value: "LAST", + expectedDiags: nil, }, { - name: "junk does not pass", - args: args{ - value: "other", - field: "list_value_location", - }, - wantWarns: nil, - wantErrs: []error{fmt.Errorf("%q must be either 'FIRST' or 'LAST' not %s", "list_value_location", "other")}, + name: "junk does not pass", + value: "other", + expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'FIRST' or 'LAST' not other"))}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotWarns, gotErrs := validateListLocationValue(tt.args.value, tt.args.field) - if !reflect.DeepEqual(gotWarns, tt.wantWarns) { - t.Errorf("validateListLocationValue() gotWarns = %v, want %v", gotWarns, tt.wantWarns) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + diags := validateListLocationValue(tc.value, cty.Path{}) + if len(diags) != len(tc.expectedDiags) { + t.Fatalf("%s: wrong number of diags, expected %d, got %d", tc.name, len(tc.expectedDiags), len(diags)) } - if !reflect.DeepEqual(gotErrs, tt.wantErrs) { - t.Errorf("validateListLocationValue() gotErrs = %v, want %v", gotErrs, tt.wantErrs) + for j := range diags { + if diags[j].Severity != tc.expectedDiags[j].Severity { + t.Fatalf("%s: expected severity %v, got %v", tc.name, tc.expectedDiags[j].Severity, diags[j].Severity) + } + if !diags[j].AttributePath.Equals(tc.expectedDiags[j].AttributePath) { + t.Fatalf("%s: attribute paths do not match expected: %v, got %v", tc.name, tc.expectedDiags[j].AttributePath, diags[j].AttributePath) + } + if diags[j].Summary != tc.expectedDiags[j].Summary { + t.Fatalf("%s: summary does not match expected: %v, got %v", tc.name, tc.expectedDiags[j].Summary, diags[j].Summary) + } } }) } From 843be6e1d5b63e2a3c011328c6b39e530d6d91b5 Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Tue, 9 Jun 2020 14:09:50 +0100 Subject: [PATCH 04/23] add support for class_name validation on several resources --- ...ource_pingaccess_access_token_validator.go | 9 ++- ..._pingaccess_access_token_validator_test.go | 23 ++++++ .../resource_pingaccess_hsm_provider.go | 10 +++ .../resource_pingaccess_hsm_provider_test.go | 35 +++++++-- .../resource_pingaccess_identity_mapping.go | 9 ++- .../resource_pingaccess_pingfederate_admin.go | 8 +- ...urce_pingaccess_pingfederate_admin_test.go | 8 +- .../resource_pingaccess_site_authenticator.go | 10 +++ ...urce_pingaccess_site_authenticator_test.go | 27 +++++++ pingaccess/structures.go | 4 +- .../v2/helper/customdiff/compose.go | 75 ------------------- .../v2/helper/customdiff/computed.go | 18 ----- .../v2/helper/customdiff/condition.go | 62 --------------- .../v2/helper/customdiff/doc.go | 11 --- .../v2/helper/customdiff/force_new.go | 42 ----------- .../v2/helper/customdiff/validate.go | 40 ---------- vendor/modules.txt | 2 +- 17 files changed, 119 insertions(+), 274 deletions(-) delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/compose.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/computed.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/condition.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/force_new.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff/validate.go diff --git a/pingaccess/resource_pingaccess_access_token_validator.go b/pingaccess/resource_pingaccess_access_token_validator.go index edc01d4d..6ead96d4 100644 --- a/pingaccess/resource_pingaccess_access_token_validator.go +++ b/pingaccess/resource_pingaccess_access_token_validator.go @@ -25,10 +25,11 @@ func resourcePingAccessAccessTokenValidator() *schema.Resource { svc := m.(*pingaccess.Client).AccessTokenValidators desc, _, _ := svc.GetAccessTokenValidatorDescriptorsCommand() className := d.Get("class_name").(string) - if err := descriptorsHasClassName(className, desc); err != nil { - return err - } - return validateConfiguration(className, d, desc) + return descriptorsHasClassName(className, desc) + //if err := descriptorsHasClassName(className, desc); err != nil { + // return err + //} + //return validateConfiguration(className, d, desc) }, } } diff --git a/pingaccess/resource_pingaccess_access_token_validator_test.go b/pingaccess/resource_pingaccess_access_token_validator_test.go index e2ecd08c..465dfefa 100644 --- a/pingaccess/resource_pingaccess_access_token_validator_test.go +++ b/pingaccess/resource_pingaccess_access_token_validator_test.go @@ -2,6 +2,7 @@ package pingaccess import ( "fmt" + "regexp" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -35,6 +36,10 @@ func TestAccPingAccessAccessTokenValidator(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration", "{\"audience\":null,\"description\":null,\"issuer\":null,\"path\":\"/bar/foo\",\"subjectAttributeName\":\"foo\"}"), ), }, + { + Config: testAccPingAccessAccessTokenValidatorConfigInvalidClassName(), + ExpectError: regexp.MustCompile(`unable to find className 'com.pingidentity.pa.accesstokenvalidators.foo' available classNames: com.pingidentity.pa.accesstokenvalidators.JwksEndpoint`), + }, }, }) } @@ -61,6 +66,24 @@ func testAccPingAccessAccessTokenValidatorConfig(configUpdate string) string { }`, configUpdate) } +func testAccPingAccessAccessTokenValidatorConfigInvalidClassName() string { + return ` + resource "pingaccess_access_token_validator" "test" { + class_name = "com.pingidentity.pa.accesstokenvalidators.foo" + name = "foo" + + configuration = < Date: Tue, 9 Jun 2020 15:20:44 +0100 Subject: [PATCH 05/23] fix identity mapping tests bump travis to go1.14 setup github actions tweak makefile tweak makefile Update build.yaml Update build.yaml Update build.yaml --- .github/workflows/build.yaml | 30 +++++++++++++++++++ .travis.yml | 2 +- Makefile | 3 +- ...source_pingaccess_identity_mapping_test.go | 11 +++---- 4 files changed, 38 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/build.yaml diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml new file mode 100644 index 00000000..ffa42b99 --- /dev/null +++ b/.github/workflows/build.yaml @@ -0,0 +1,30 @@ +on: push +name: Main Workflow +jobs: + build: + name: Build + runs-on: ubuntu-latest + steps: + + - name: Set up Go 1.14 + uses: actions/setup-go@v1 + with: + go-version: 1.14 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v1 + + - name: Get dependencies + run: | + rm -rf vendor/ + go mod vendor + go mod verify + + - name: Test + env: + PING_IDENTITY_DEVOPS_USER: ${{ secrets.PING_IDENTITY_DEVOPS_USER }} + PING_IDENTITY_DEVOPS_KEY: ${{ secrets.PING_IDENTITY_DEVOPS_KEY }} + run: | + TF_ACC=1 go test -mod=vendor ./... -v -trimpath -coverprofile=coverage.out + go tool cover -func=coverage.out diff --git a/.travis.yml b/.travis.yml index d5c71110..e3e8344b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,7 +9,7 @@ services: language: go go: -- 1.13.x +- 1.14.x addons: sonarcloud: diff --git a/Makefile b/Makefile index ecac97c8..923f0e1a 100644 --- a/Makefile +++ b/Makefile @@ -15,8 +15,7 @@ unit-test: test-and-report: - @rm -f pingaccess/terraform.log coverage.out report.json - @TF_LOG=TRACE TF_LOG_PATH=./terraform.log TF_ACC=1 go test -mod=vendor ./... -v -trimpath -coverprofile=coverage.out -json > report.json && go tool cover -func=coverage.out + TF_LOG=TRACE TF_LOG_PATH=./terraform.log TF_ACC=1 go test -mod=vendor ./... -v -trimpath -coverprofile=coverage.out -json > report.json && go tool cover -func=coverage.out build: @go build -mod=vendor -o ${NAME} -trimpath . diff --git a/pingaccess/resource_pingaccess_identity_mapping_test.go b/pingaccess/resource_pingaccess_identity_mapping_test.go index 5417d2db..2d15925e 100644 --- a/pingaccess/resource_pingaccess_identity_mapping_test.go +++ b/pingaccess/resource_pingaccess_identity_mapping_test.go @@ -48,12 +48,13 @@ func TestAccPingAccessIdentityMapping(t *testing.T) { }, { Config: testAccPingAccessIdentityMappingConfigWrongClassName(), - ExpectError: regexp.MustCompile(`unable to find className 'com.pingidentity.pa.identitymappings.foo' available className's: com.pingidentity.pa.identitymappings.HeaderIdentityMapping, com.pingidentity.pa.identitymappings.JwtIdentityMapping`), - }, - { - Config: testAccPingAccessIdentityMappingConfigMissingRequired(), - ExpectError: regexp.MustCompile(`configuration validation failed against the class descriptor definition\nthe field 'headerName' is required for the class_name 'com.pingidentity.pa.identitymappings.JwtIdentityMapping'\nthe field 'audience' is required for the class_name 'com.pingidentity.pa.identitymappings.JwtIdentityMapping'`), + ExpectError: regexp.MustCompile(`unable to find className 'com.pingidentity.pa.identitymappings.foo' available classNames: com.pingidentity.pa.identitymappings.HeaderIdentityMapping, com.pingidentity.pa.identitymappings.JwtIdentityMapping`), }, + //TODO disabling configuration validation until I can handle interpolated configuration + //{ + // Config: testAccPingAccessIdentityMappingConfigMissingRequired(), + // ExpectError: regexp.MustCompile(`configuration validation failed against the class descriptor definition\nthe field 'headerName' is required for the class_name 'com.pingidentity.pa.identitymappings.JwtIdentityMapping'\nthe field 'audience' is required for the class_name 'com.pingidentity.pa.identitymappings.JwtIdentityMapping'`), + //}, }, }) } From 4c000c2d4d3919ca089d21aea1d6d3166850d422 Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Wed, 10 Jun 2020 16:15:22 +0100 Subject: [PATCH 06/23] refactor away the last deprecated code, add conditional configuration validation for non-interpolated configuration blocks --- ...ource_pingaccess_access_token_validator.go | 9 +- ...esource_pingaccess_application_resource.go | 27 +++--- ...ce_pingaccess_application_resource_test.go | 5 ++ .../resource_pingaccess_engine_listener.go | 12 ++- ...esource_pingaccess_engine_listener_test.go | 13 +-- .../resource_pingaccess_hsm_provider.go | 9 +- .../resource_pingaccess_identity_mapping.go | 9 +- ...source_pingaccess_identity_mapping_test.go | 45 ++++++++-- ...esource_pingaccess_pingfederate_runtime.go | 17 ++-- ...ce_pingaccess_pingfederate_runtime_test.go | 9 +- pingaccess/resource_pingaccess_rule.go | 9 +- .../resource_pingaccess_site_authenticator.go | 9 +- ...resource_pingaccess_third_party_service.go | 33 ++----- ...rce_pingaccess_third_party_service_test.go | 2 +- pingaccess/resource_pingaccess_websession.go | 88 ++++++------------- .../resource_pingaccess_websession_test.go | 40 ++------- pingaccess/structures.go | 9 ++ 17 files changed, 156 insertions(+), 189 deletions(-) diff --git a/pingaccess/resource_pingaccess_access_token_validator.go b/pingaccess/resource_pingaccess_access_token_validator.go index 6ead96d4..edc01d4d 100644 --- a/pingaccess/resource_pingaccess_access_token_validator.go +++ b/pingaccess/resource_pingaccess_access_token_validator.go @@ -25,11 +25,10 @@ func resourcePingAccessAccessTokenValidator() *schema.Resource { svc := m.(*pingaccess.Client).AccessTokenValidators desc, _, _ := svc.GetAccessTokenValidatorDescriptorsCommand() className := d.Get("class_name").(string) - return descriptorsHasClassName(className, desc) - //if err := descriptorsHasClassName(className, desc); err != nil { - // return err - //} - //return validateConfiguration(className, d, desc) + if err := descriptorsHasClassName(className, desc); err != nil { + return err + } + return validateConfiguration(className, d, desc) }, } } diff --git a/pingaccess/resource_pingaccess_application_resource.go b/pingaccess/resource_pingaccess_application_resource.go index 2619fe3c..e08fb684 100644 --- a/pingaccess/resource_pingaccess_application_resource.go +++ b/pingaccess/resource_pingaccess_application_resource.go @@ -30,14 +30,17 @@ func resourcePingAccessApplicationResourceSchema() map[string]*schema.Schema { "anonymous": { Type: schema.TypeBool, Optional: true, + Default: false, }, "application_id": { Type: schema.TypeString, Required: true, }, "audit_level": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Default: "ON", + ValidateDiagFunc: validateAuditLevel, }, "default_auth_type_override": { Type: schema.TypeString, @@ -47,6 +50,7 @@ func resourcePingAccessApplicationResourceSchema() map[string]*schema.Schema { "enabled": { Type: schema.TypeBool, Optional: true, + Default: true, }, "methods": { Type: schema.TypeSet, @@ -87,10 +91,12 @@ func resourcePingAccessApplicationResourceSchema() map[string]*schema.Schema { "root_resource": { Type: schema.TypeBool, Optional: true, + Default: false, }, "unprotected": { Type: schema.TypeBool, Optional: true, + Default: false, }, } } @@ -227,9 +233,7 @@ func resourcePingAccessApplicationResourceReadData(d *schema.ResourceData) *pa.R Methods: &methods, } - if v, ok := d.GetOkExists("anonymous"); ok { - resource.Anonymous = Bool(v.(bool)) - } + resource.Anonymous = Bool(d.Get("anonymous").(bool)) if v, ok := d.GetOk("application_id"); ok { applicationID, _ := strconv.Atoi(v.(string)) @@ -244,17 +248,10 @@ func resourcePingAccessApplicationResourceReadData(d *schema.ResourceData) *pa.R resource.DefaultAuthTypeOverride = String(v.(string)) } - if v, ok := d.GetOkExists("enabled"); ok { - resource.Enabled = Bool(v.(bool)) - } - - if v, ok := d.GetOkExists("root_resource"); ok { - resource.RootResource = Bool(v.(bool)) - } + resource.Enabled = Bool(d.Get("enabled").(bool)) + resource.RootResource = Bool(d.Get("root_resource").(bool)) + resource.Unprotected = Bool(d.Get("unprotected").(bool)) - if v, ok := d.GetOkExists("unprotected"); ok { - resource.Unprotected = Bool(v.(bool)) - } if v, ok := d.GetOk("path_prefixes"); ok { pathPrefixes := expandStringList(v.(*schema.Set).List()) resource.PathPrefixes = &pathPrefixes diff --git a/pingaccess/resource_pingaccess_application_resource_test.go b/pingaccess/resource_pingaccess_application_resource_test.go index 556a4b66..47f12023 100644 --- a/pingaccess/resource_pingaccess_application_resource_test.go +++ b/pingaccess/resource_pingaccess_application_resource_test.go @@ -327,6 +327,11 @@ func Test_resourcePingAccessApplicationResourceReadData(t *testing.T) { ApplicationId: Int(0), Methods: &[]*string{String("GET")}, Name: String("false"), + Anonymous: Bool(false), + AuditLevel: String("OFF"), + Enabled: Bool(false), + RootResource: Bool(true), + Unprotected: Bool(true), }, }, } diff --git a/pingaccess/resource_pingaccess_engine_listener.go b/pingaccess/resource_pingaccess_engine_listener.go index 45da8e49..ae028f2b 100644 --- a/pingaccess/resource_pingaccess_engine_listener.go +++ b/pingaccess/resource_pingaccess_engine_listener.go @@ -36,6 +36,12 @@ func resourcePingAccessEngineListenerSchema() map[string]*schema.Schema { "secure": { Type: schema.TypeBool, Optional: true, + Default: true, + }, + "trusted_certificate_group_id": { + Type: schema.TypeInt, + Optional: true, + Default: 0, }, } } @@ -100,6 +106,7 @@ func resourcePingAccessEngineListenerReadResult(d *schema.ResourceData, input *p setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) setResourceDataIntWithDiagnostic(d, "port", input.Port, &diags) setResourceDataBoolWithDiagnostic(d, "secure", input.Secure, &diags) + setResourceDataIntWithDiagnostic(d, "trusted_certificate_group_id", input.TrustedCertificateGroupId, &diags) return diags } @@ -108,9 +115,8 @@ func resourcePingAccessEngineListenerReadData(d *schema.ResourceData) *pa.Engine Name: String(d.Get("name").(string)), Port: Int(d.Get("port").(int)), } - if v, ok := d.GetOkExists("secure"); ok { - engine.Secure = Bool(v.(bool)) - } + engine.Secure = Bool(d.Get("secure").(bool)) + engine.TrustedCertificateGroupId = Int(d.Get("trusted_certificate_group_id").(int)) return engine } diff --git a/pingaccess/resource_pingaccess_engine_listener_test.go b/pingaccess/resource_pingaccess_engine_listener_test.go index 86b27cfb..9c27508f 100644 --- a/pingaccess/resource_pingaccess_engine_listener_test.go +++ b/pingaccess/resource_pingaccess_engine_listener_test.go @@ -80,15 +80,18 @@ func Test_resourcePingAccessEngineListenerReadData(t *testing.T) { }{ { EngineListener: pa.EngineListenerView{ - Name: String("engine1"), - Port: Int(9999), + Name: String("engine1"), + Port: Int(9999), + Secure: Bool(true), + TrustedCertificateGroupId: Int(0), }, }, { EngineListener: pa.EngineListenerView{ - Name: String("engine2"), - Port: Int(9999), - Secure: Bool(true), + Name: String("engine2"), + Port: Int(9999), + Secure: Bool(false), + TrustedCertificateGroupId: Int(2), }, }, } diff --git a/pingaccess/resource_pingaccess_hsm_provider.go b/pingaccess/resource_pingaccess_hsm_provider.go index e8309544..c5d47231 100644 --- a/pingaccess/resource_pingaccess_hsm_provider.go +++ b/pingaccess/resource_pingaccess_hsm_provider.go @@ -26,11 +26,10 @@ func resourcePingAccessHsmProvider() *schema.Resource { svc := m.(*pingaccess.Client).HsmProviders desc, _, _ := svc.GetHsmProviderDescriptorsCommand() className := d.Get("class_name").(string) - return descriptorsHasClassName(className, desc) - //if err := descriptorsHasClassName(className, desc); err != nil { - // return err - //} - //return validateConfiguration(className, d, desc) + if err := descriptorsHasClassName(className, desc); err != nil { + return err + } + return validateConfiguration(className, d, desc) }, } } diff --git a/pingaccess/resource_pingaccess_identity_mapping.go b/pingaccess/resource_pingaccess_identity_mapping.go index a09fb78e..9019bb14 100644 --- a/pingaccess/resource_pingaccess_identity_mapping.go +++ b/pingaccess/resource_pingaccess_identity_mapping.go @@ -29,11 +29,10 @@ func resourcePingAccessIdentityMapping() *schema.Resource { svc := m.(*pingaccess.Client).IdentityMappings desc, _, _ := svc.GetIdentityMappingDescriptorsCommand() className := d.Get("class_name").(string) - return descriptorsHasClassName(className, desc) - //if err := descriptorsHasClassName(className, desc); err != nil { - // return err - //} - //return validateConfiguration(className, d, desc) + if err := descriptorsHasClassName(className, desc); err != nil { + return err + } + return validateConfiguration(className, d, desc) }, } } diff --git a/pingaccess/resource_pingaccess_identity_mapping_test.go b/pingaccess/resource_pingaccess_identity_mapping_test.go index 2d15925e..44f559ad 100644 --- a/pingaccess/resource_pingaccess_identity_mapping_test.go +++ b/pingaccess/resource_pingaccess_identity_mapping_test.go @@ -50,11 +50,20 @@ func TestAccPingAccessIdentityMapping(t *testing.T) { Config: testAccPingAccessIdentityMappingConfigWrongClassName(), ExpectError: regexp.MustCompile(`unable to find className 'com.pingidentity.pa.identitymappings.foo' available classNames: com.pingidentity.pa.identitymappings.HeaderIdentityMapping, com.pingidentity.pa.identitymappings.JwtIdentityMapping`), }, - //TODO disabling configuration validation until I can handle interpolated configuration - //{ - // Config: testAccPingAccessIdentityMappingConfigMissingRequired(), - // ExpectError: regexp.MustCompile(`configuration validation failed against the class descriptor definition\nthe field 'headerName' is required for the class_name 'com.pingidentity.pa.identitymappings.JwtIdentityMapping'\nthe field 'audience' is required for the class_name 'com.pingidentity.pa.identitymappings.JwtIdentityMapping'`), - //}, + { + Config: testAccPingAccessIdentityMappingConfigMissingRequired(), + ExpectError: regexp.MustCompile(`configuration validation failed against the class descriptor definition\nthe field 'headerName' is required for the class_name 'com.pingidentity.pa.identitymappings.JwtIdentityMapping'\nthe field 'audience' is required for the class_name 'com.pingidentity.pa.identitymappings.JwtIdentityMapping'`), + }, + { + Config: testAccPingAccessIdentityMappingConfigInterpolatedSkipped(), + Check: resource.ComposeTestCheckFunc( + testAccCheckPingAccessIdentityMappingExists(resourceName), + testAccCheckPingAccessIdentityMappingAttributes(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", "interpolated"), + resource.TestCheckResourceAttr(resourceName, "class_name", "com.pingidentity.pa.identitymappings.HeaderIdentityMapping"), + resource.TestCheckResourceAttrSet(resourceName, "configuration"), + ), + }, }, }) } @@ -112,6 +121,32 @@ func testAccPingAccessIdentityMappingConfigMissingRequired() string { }` } +func testAccPingAccessIdentityMappingConfigInterpolatedSkipped() string { + return ` + resource "pingaccess_identity_mapping" "acc_test_idm" { + class_name = "com.pingidentity.pa.identitymappings.HeaderIdentityMapping" + name = "interpolated" + configuration = < Date: Wed, 10 Jun 2020 19:25:03 +0100 Subject: [PATCH 07/23] add extra checks to workflow/pre-commit --- .github/workflows/build.yaml | 22 ++++++++++++++++++---- .travis.yml | 22 ---------------------- Makefile | 3 +-- go.sum | 1 + 4 files changed, 20 insertions(+), 28 deletions(-) delete mode 100644 .travis.yml diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ffa42b99..f79a3621 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -12,15 +12,29 @@ jobs: go-version: 1.14 id: go + - name: Install tools + run: | + export PATH=$PATH:$(go env GOPATH)/bin + go get -u honnef.co/go/tools/cmd/staticcheck + - name: Check out code into the Go module directory uses: actions/checkout@v1 - - name: Get dependencies + - name: Staticcheck run: | - rm -rf vendor/ + export PATH=$PATH:$(go env GOPATH)/bin + staticcheck ./... + + - name: Check dependencies + run: | + go mod tidy + git diff --exit-code -- go.mod go.sum || \ + (echo; echo "Unexpected difference in go.mod/go.sum files. Run 'go mod tidy' command or revert any go.mod/go.sum changes and commit."; exit 1) + echo "==> Checking source code with go mod vendor..." go mod vendor - go mod verify - + git diff --compact-summary --exit-code -- vendor || \ + (echo; echo "Unexpected difference in vendor/ directory. Run 'go mod vendor' command or revert any go.mod/go.sum/vendor changes and commit."; exit 1) + - name: Test env: PING_IDENTITY_DEVOPS_USER: ${{ secrets.PING_IDENTITY_DEVOPS_USER }} diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index e3e8344b..00000000 --- a/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -dist: trusty - -git: - depth: false - -services: - - docker - -language: go - -go: -- 1.14.x - -addons: - sonarcloud: - organization: "iwarapter-github" - -script: - - go mod vendor || travis_terminate 1; - - go mod verify || travis_terminate 1; - - make test-and-report || travis_terminate 1; - - sonar-scanner || travis_terminate 1; diff --git a/Makefile b/Makefile index 923f0e1a..0aa0be82 100644 --- a/Makefile +++ b/Makefile @@ -13,9 +13,8 @@ test: unit-test: @go test -mod=vendor ./... -v -trimpath - test-and-report: - TF_LOG=TRACE TF_LOG_PATH=./terraform.log TF_ACC=1 go test -mod=vendor ./... -v -trimpath -coverprofile=coverage.out -json > report.json && go tool cover -func=coverage.out + TF_LOG=TRACE TF_LOG_PATH=./terraform.log TF_ACC=1 go test -mod=vendor ./... -v -trimpath -coverprofile=coverage.out -json | tee report.json build: @go build -mod=vendor -o ${NAME} -trimpath . diff --git a/go.sum b/go.sum index d0675bcd..4b277c5b 100644 --- a/go.sum +++ b/go.sum @@ -348,6 +348,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= From 27a51400b40ce8a4ccd6d370be368c98118d4553 Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Wed, 10 Jun 2020 22:02:42 +0100 Subject: [PATCH 08/23] add sonar scan --- .github/workflows/build.yaml | 12 +++++++++--- sonar-project.properties | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index f79a3621..01c1de34 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -1,8 +1,8 @@ -on: push -name: Main Workflow +on: [push, pull_request] +name: ci jobs: build: - name: Build + name: build runs-on: ubuntu-latest steps: @@ -42,3 +42,9 @@ jobs: run: | TF_ACC=1 go test -mod=vendor ./... -v -trimpath -coverprofile=coverage.out go tool cover -func=coverage.out + + - name: SonarCloud Scan + uses: sonarsource/sonarcloud-github-action@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} diff --git a/sonar-project.properties b/sonar-project.properties index ff8050fa..cacebb15 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -5,3 +5,4 @@ sonar.go.tests.reportPaths=report.json sonar.exclusions=vendor/**/* sonar.tests="." sonar.test.inclusions="**/*_test.go" +sonar.organization=iwarapter-github From 896e498d33b44147677232eb80995c041c0d6bbe Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Sun, 14 Jun 2020 21:06:59 +0100 Subject: [PATCH 09/23] bump to sdk 2.0.0-rc.2 --- Makefile | 2 +- go.mod | 4 +- go.sum | 29 +- pingaccess/config.go | 2 +- .../data_source_pingaccess_acme_default.go | 2 +- .../data_source_pingaccess_certificate.go | 7 +- pingaccess/data_source_pingaccess_keypair.go | 8 +- ...ingaccess_pingfederate_runtime_metadata.go | 30 +- ...e_pingaccess_trusted_certificate_groups.go | 6 +- pingaccess/provider.go | 6 +- pingaccess/provider_test.go | 2 - ...ource_pingaccess_access_token_validator.go | 10 +- pingaccess/resource_pingaccess_acme_server.go | 10 +- pingaccess/resource_pingaccess_application.go | 19 +- ...esource_pingaccess_application_resource.go | 18 +- ...source_pingaccess_auth_token_management.go | 12 +- ...e_pingaccess_auth_token_management_test.go | 2 +- .../resource_pingaccess_authn_req_list.go | 10 +- pingaccess/resource_pingaccess_certificate.go | 13 +- .../resource_pingaccess_engine_listener.go | 8 +- .../resource_pingaccess_hsm_provider.go | 10 +- ...gaccess_http_config_request_host_source.go | 8 +- .../resource_pingaccess_https_listener.go | 8 +- .../resource_pingaccess_identity_mapping.go | 10 +- pingaccess/resource_pingaccess_keypair.go | 15 +- .../resource_pingaccess_oauth_server.go | 16 +- .../resource_pingaccess_oauth_server_test.go | 4 +- .../resource_pingaccess_pingfederate_admin.go | 14 +- ...urce_pingaccess_pingfederate_admin_test.go | 2 +- .../resource_pingaccess_pingfederate_oauth.go | 14 +- ...urce_pingaccess_pingfederate_oauth_test.go | 2 +- ...esource_pingaccess_pingfederate_runtime.go | 12 +- ...ce_pingaccess_pingfederate_runtime_test.go | 2 +- pingaccess/resource_pingaccess_rule.go | 12 +- pingaccess/resource_pingaccess_rulesets.go | 11 +- pingaccess/resource_pingaccess_site.go | 12 +- .../resource_pingaccess_site_authenticator.go | 10 +- ...resource_pingaccess_third_party_service.go | 14 +- ...e_pingaccess_trusted_certificate_groups.go | 10 +- pingaccess/resource_pingaccess_virtualhost.go | 8 +- pingaccess/resource_pingaccess_websession.go | 12 +- .../resource_pingaccess_websession_test.go | 2 +- pingaccess/structures.go | 25 +- pingaccess/validators.go | 26 +- pingaccess/validators_test.go | 20 +- vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 2 +- .../aws/aws-sdk-go/aws/awserr/error.go | 23 +- .../aws/aws-sdk-go/aws/awserr/types.go | 31 +- .../aws/aws-sdk-go/aws/awsutil/equal.go | 2 +- .../aws/aws-sdk-go/aws/awsutil/path_value.go | 11 +- .../aws-sdk-go/aws/awsutil/string_value.go | 25 +- .../aws/aws-sdk-go/aws/client/client.go | 4 +- .../aws-sdk-go/aws/client/default_retryer.go | 151 +- .../aws/aws-sdk-go/aws/client/logger.go | 18 +- .../aws-sdk-go/aws/client/no_op_retryer.go | 28 + .../github.com/aws/aws-sdk-go/aws/config.go | 22 +- .../aws/{context.go => context_1_5.go} | 40 +- .../aws/aws-sdk-go/aws/context_1_7.go | 9 - .../aws/aws-sdk-go/aws/context_1_9.go | 11 + ...ntext_1_6.go => context_background_1_5.go} | 15 + .../aws-sdk-go/aws/context_background_1_7.go | 20 + .../aws/aws-sdk-go/aws/context_sleep.go | 24 + .../aws/aws-sdk-go/aws/convert_types.go | 531 +++ .../aws-sdk-go/aws/corehandlers/handlers.go | 64 +- .../aws-sdk-go/aws/corehandlers/user_agent.go | 2 +- .../aws-sdk-go/aws/credentials/credentials.go | 42 + .../ec2rolecreds/ec2_role_provider.go | 6 +- .../aws/credentials/endpointcreds/provider.go | 19 +- .../aws/credentials/processcreds/provider.go | 425 +++ .../stscreds/assume_role_provider.go | 22 +- .../stscreds/web_identity_provider.go | 100 + .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 65 +- .../aws/aws-sdk-go/aws/csm/enable.go | 34 +- .../aws/aws-sdk-go/aws/csm/metric.go | 56 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 11 +- .../aws-sdk-go/aws/csm/metric_exception.go | 26 + .../aws/aws-sdk-go/aws/csm/reporter.go | 71 +- .../aws/aws-sdk-go/aws/ec2metadata/api.go | 46 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 9 +- .../aws/aws-sdk-go/aws/endpoints/decode.go | 28 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 2495 ++++++++++++-- .../aws/endpoints/dep_service_ids.go | 141 + .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 9 +- .../aws/aws-sdk-go/aws/endpoints/v3model.go | 5 +- .../aws/endpoints/v3model_codegen.go | 24 +- .../aws/request/connection_reset_error.go | 17 +- .../request/connection_reset_error_other.go | 11 - .../aws/aws-sdk-go/aws/request/handlers.go | 52 +- .../aws-sdk-go/aws/request/offset_reader.go | 15 +- .../aws/aws-sdk-go/aws/request/request.go | 229 +- .../aws/aws-sdk-go/aws/request/request_1_8.go | 5 +- .../aws/request/request_pagination.go | 2 +- .../aws/aws-sdk-go/aws/request/retryer.go | 167 +- .../aws/aws-sdk-go/aws/request/validation.go | 25 + .../aws/session/cabundle_transport.go | 26 + .../aws/session/cabundle_transport_1_5.go | 22 + .../aws/session/cabundle_transport_1_6.go | 23 + .../aws/aws-sdk-go/aws/session/credentials.go | 259 ++ .../aws/aws-sdk-go/aws/session/doc.go | 208 +- .../aws/aws-sdk-go/aws/session/env_config.go | 65 +- .../aws/aws-sdk-go/aws/session/session.go | 289 +- .../aws-sdk-go/aws/session/shared_config.go | 364 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 83 +- vendor/github.com/aws/aws-sdk-go/aws/types.go | 20 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/internal/ini/ini_parser.go | 11 +- .../aws/aws-sdk-go/internal/ini/statement.go | 2 +- .../aws/aws-sdk-go/internal/sdkio/byte.go | 12 + .../aws/aws-sdk-go/internal/sdkmath/floor.go | 15 + .../internal/sdkmath/floor_go1.9.go | 56 + .../aws/aws-sdk-go/internal/sdkrand/read.go | 11 + .../aws-sdk-go/internal/sdkrand/read_1_5.go | 24 + .../internal/shareddefaults/ecs_container.go | 2 +- .../aws/aws-sdk-go/private/protocol/host.go | 51 +- .../private/protocol/host_prefix.go | 54 + .../private/protocol/json/jsonutil/build.go | 296 ++ .../protocol/json/jsonutil/unmarshal.go | 250 ++ .../private/protocol/query/build.go | 2 +- .../private/protocol/query/unmarshal.go | 2 +- .../private/protocol/query/unmarshal_error.go | 77 +- .../aws-sdk-go/private/protocol/rest/build.go | 27 +- .../private/protocol/rest/unmarshal.go | 36 +- .../private/protocol/restxml/restxml.go | 8 +- .../aws-sdk-go/private/protocol/timestamp.go | 20 +- .../private/protocol/xml/xmlutil/build.go | 2 +- .../private/protocol/xml/xmlutil/sort.go | 32 + .../private/protocol/xml/xmlutil/unmarshal.go | 19 + .../protocol/xml/xmlutil/xml_to_struct.go | 13 +- .../aws/aws-sdk-go/service/s3/api.go | 3027 +++++++++++++---- .../aws-sdk-go/service/s3/bucket_location.go | 3 +- .../aws-sdk-go/service/s3/customizations.go | 4 +- .../aws/aws-sdk-go/service/s3/doc_custom.go | 14 + .../aws/aws-sdk-go/service/s3/sse.go | 64 +- .../aws-sdk-go/service/s3/statusok_error.go | 4 +- .../aws-sdk-go/service/s3/unmarshal_error.go | 47 +- .../aws/aws-sdk-go/service/sts/api.go | 1140 ++++--- .../aws-sdk-go/service/sts/customizations.go | 11 +- .../aws/aws-sdk-go/service/sts/doc.go | 76 +- .../aws/aws-sdk-go/service/sts/errors.go | 2 +- .../service/sts/stsiface/interface.go | 96 + .../hashicorp/go-getter/.travis.yml | 24 - .../github.com/hashicorp/go-getter/README.md | 8 +- .../github.com/hashicorp/go-getter/client.go | 2 +- .../github.com/hashicorp/go-getter/get_git.go | 28 +- .../hashicorp/go-getter/get_http.go | 24 +- .../github.com/hashicorp/go-plugin/README.md | 5 - .../github.com/hashicorp/go-plugin/client.go | 16 +- .../hashicorp/go-plugin/grpc_stdio.go | 10 +- .../github.com/hashicorp/go-plugin/server.go | 250 +- .../github.com/hashicorp/hcl/v2/CHANGELOG.md | 33 + vendor/github.com/hashicorp/hcl/v2/README.md | 11 +- .../github.com/hashicorp/hcl/v2/appveyor.yml | 13 + .../hcl/v2/ext/customdecode/README.md | 209 ++ .../hcl/v2/ext/customdecode/customdecode.go | 56 + .../v2/ext/customdecode/expression_type.go | 146 + vendor/github.com/hashicorp/hcl/v2/go.mod | 4 +- vendor/github.com/hashicorp/hcl/v2/go.sum | 8 +- .../hashicorp/hcl/v2/hclsyntax/expression.go | 56 +- .../hashicorp/hcl/v2/hclsyntax/parser.go | 11 +- .../terraform-plugin-sdk/v2/acctest/doc.go | 24 - .../terraform-plugin-sdk/v2/acctest/helper.go | 26 - .../v2/diag/diagnostic.go | 8 - .../terraform-plugin-sdk/v2/diag/helpers.go | 36 + .../v2/helper/resource/plugin.go | 135 + .../v2/helper/resource/testing.go | 85 +- .../v2/helper/resource/testing_new.go | 41 +- .../v2/helper/resource/testing_new_config.go | 75 +- .../resource/testing_new_import_state.go | 36 +- .../v2/helper/schema/provider.go | 49 +- .../v2/helper/schema/resource.go | 54 +- .../v2/helper/schema/resource_data.go | 28 +- .../v2/helper/schema/schema.go | 16 +- .../v2/helper/schema/set.go | 2 +- .../helper/hashcode/hashcode.go | 0 .../terraform-plugin-sdk/v2/meta/meta.go | 36 + .../terraform-plugin-sdk/v2/plugin/debug.go | 102 + .../terraform-plugin-sdk/v2/plugin/serve.go | 14 +- .../v2/terraform/state.go | 9 +- .../hashicorp/terraform-plugin-test/config.go | 2 +- .../terraform-plugin-test/terraform.go | 16 +- .../terraform-plugin-test/working_dir.go | 22 +- .../iwarapter/pingaccess-sdk-go/LICENSE | 21 + .../pingaccess/accessTokenValidators.go | 9 + .../pingaccess-sdk-go/pingaccess/acme.go | 17 + .../pingaccess/adminConfig.go | 12 + .../pingaccess/adminSessionInfo.go | 6 + .../pingaccess-sdk-go/pingaccess/agents.go | 11 + .../pingaccess/applications.go | 20 + .../pingaccess-sdk-go/pingaccess/auth.go | 15 + .../pingaccess/authTokenManagement.go | 8 + .../pingaccess/authnReqLists.go | 8 + .../pingaccess-sdk-go/pingaccess/backup.go | 4 + .../pingaccess/certificates.go | 9 + .../pingaccess-sdk-go/pingaccess/config.go | 12 + .../pingaccess/engineListeners.go | 8 + .../pingaccess-sdk-go/pingaccess/engines.go | 12 + .../pingaccess/globalUnprotectedResources.go | 8 + .../pingaccess/highAvailability.go | 17 + .../pingaccess/hsmProviders.go | 9 + .../pingaccess/httpConfig.go | 15 + .../pingaccess/httpsListeners.go | 6 + .../pingaccess/identityMappings.go | 10 + .../pingaccess-sdk-go/pingaccess/keyPairs.go | 17 + .../pingaccess-sdk-go/pingaccess/license.go | 5 + .../pingaccess-sdk-go/pingaccess/models.go | 71 +- .../pingaccess-sdk-go/pingaccess/oauth.go | 6 + .../pingaccess/oauthKeyManagement.go | 8 + .../pingaccess-sdk-go/pingaccess/oidc.go | 9 + .../pingaccess/pingaccess.go | 158 +- .../pingaccess/pingaccessSDK.go | 2 +- .../pingaccess/pingfederate.go | 17 + .../pingaccess-sdk-go/pingaccess/pingone.go | 7 + .../pingaccess-sdk-go/pingaccess/proxies.go | 8 + .../pingaccess-sdk-go/pingaccess/redirects.go | 8 + .../pingaccess/rejectionHandlers.go | 10 + .../pingaccess-sdk-go/pingaccess/rules.go | 10 + .../pingaccess-sdk-go/pingaccess/rulesets.go | 10 + .../pingaccess/sharedSecrets.go | 7 + .../pingaccess/siteAuthenticators.go | 10 + .../pingaccess-sdk-go/pingaccess/sites.go | 8 + .../pingaccess/thirdPartyServices.go | 8 + .../pingaccess/tokenProvider.go | 6 + .../pingaccess/trustedCertificateGroups.go | 8 + .../pingaccess/unknownResources.go | 6 + .../pingaccess-sdk-go/pingaccess/users.go | 7 + .../pingaccess-sdk-go/pingaccess/version.go | 4 + .../pingaccess/virtualhosts.go | 8 + .../pingaccess/webSessionManagement.go | 15 + .../pingaccess/webSessions.go | 8 + .../jmespath/go-jmespath/.gitignore | 2 +- vendor/github.com/jmespath/go-jmespath/api.go | 2 +- vendor/modules.txt | 25 +- 232 files changed, 11983 insertions(+), 2994 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go rename vendor/github.com/aws/aws-sdk-go/aws/{context.go => context_1_5.go} (58%) delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go rename vendor/github.com/aws/aws-sdk-go/aws/{context_1_6.go => context_background_1_5.go} (59%) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go delete mode 100644 vendor/github.com/hashicorp/go-getter/.travis.yml create mode 100644 vendor/github.com/hashicorp/hcl/v2/appveyor.yml create mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/customdecode/README.md create mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/helper.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go rename vendor/github.com/hashicorp/terraform-plugin-sdk/v2/{ => internal}/helper/hashcode/hashcode.go (100%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/LICENSE diff --git a/Makefile b/Makefile index 0aa0be82..b4700023 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ pa-init: test: @rm -f pingaccess/terraform.log - @TF_LOG=TRACE TF_LOG_PATH=./terraform.log TF_ACC=1 go test -mod=vendor ./... -v -trimpath + @TF_LOG=TRACE TF_LOG_PATH=./terraform.log TF_ACC=1 go test -mod=vendor ./... -v -trimpath -coverprofile=coverage.out && go tool cover -func=coverage.out unit-test: @go test -mod=vendor ./... -v -trimpath diff --git a/go.mod b/go.mod index 58bafbe2..10516d41 100644 --- a/go.mod +++ b/go.mod @@ -7,8 +7,8 @@ require ( github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c // indirect github.com/google/go-cmp v0.3.1 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.1 - github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200605105101-6a0dcd01d169 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.2 + github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200614193934-b24fe6e545e5 github.com/ory/dockertest/v3 v3.6.0 github.com/pkg/errors v0.9.1 // indirect github.com/sirupsen/logrus v1.5.0 // indirect diff --git a/go.sum b/go.sum index 4b277c5b..d3d4fed6 100644 --- a/go.sum +++ b/go.sum @@ -33,6 +33,8 @@ github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/ github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ= +github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -109,13 +111,15 @@ github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUK github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0 h1:ENHNi8494porjD0ZhIrjlAHnveSFhY7hvOJrV/fsKkw= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= +github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02 h1:l1KB3bHVdvegcIf5upQ5mjcHjs2qsWnKh4Yr9xgIuu8= +github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.2.0 h1:CUfYokW0EJNDcGecVrHZK//Cp1GFlHwoqtcUIEiU6BY= -github.com/hashicorp/go-plugin v1.2.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= +github.com/hashicorp/go-plugin v1.3.0 h1:4d/wJojzvHV1I4i/rrjVaeuyxWrLzDE1mDCyDy8fXS8= +github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= @@ -128,16 +132,16 @@ github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCO github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl/v2 v2.0.0 h1:efQznTz+ydmQXq3BOnRa3AXzvCeTq1P4dKj/z5GLlY8= -github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= +github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= +github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-json v0.4.0 h1:KNh29iNxozP5adfUFBJ4/fWd0Cu3taGgjHB38JYqOF4= github.com/hashicorp/terraform-json v0.4.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.1 h1:ko726Cy16TsSOkVS08vAHTT37KYmHmuSkvIhXFFEhaY= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.1/go.mod h1:9f8d0niQjU7io5EpT5WhyAWdDUKKw1xytr+7hIsvna0= -github.com/hashicorp/terraform-plugin-test v1.3.0 h1:hU5LoxrOn9qvOo+LTKN6mSav2J+dAMprbdxJPEQvp4U= -github.com/hashicorp/terraform-plugin-test v1.3.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.2 h1:HHppQ5ly03DFdZpuxiO2qHEbZ8uJHcZiRp37O9OfnCc= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.2/go.mod h1:EmGbJlJ/WPDVvHZKZKSPb6jBiQ5ZFuLksxBXweufgjk= +github.com/hashicorp/terraform-plugin-test v1.4.0 h1:9/eoY48ZGgcjkSkxIvUD7MGopFYeSv/2wmS09wSZg2I= +github.com/hashicorp/terraform-plugin-test v1.4.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= @@ -145,12 +149,14 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKe github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200605105101-6a0dcd01d169 h1:QbikLuFtFP35NIeBSogGV1YEVmZorSe9XhHcTRake9o= -github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200605105101-6a0dcd01d169/go.mod h1:5FnKTPlitpx5z7Q/itbtKkgCEOBXXqhGOJqmryBZrZk= +github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200614193934-b24fe6e545e5 h1:Tks2Y325NgWNlODjGUslW3V2IRa6kYwsyg0NnyH0BOc= +github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200614193934-b24fe6e545e5/go.mod h1:5FnKTPlitpx5z7Q/itbtKkgCEOBXXqhGOJqmryBZrZk= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= @@ -256,8 +262,7 @@ github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvc github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.1+incompatible h1:RMF1enSPeKTlXrXdOcqjFUElywVZjjC6pqse21bKbEU= github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/zclconf/go-cty v1.1.0 h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= diff --git a/pingaccess/config.go b/pingaccess/config.go index a7eaae35..abd34f44 100644 --- a/pingaccess/config.go +++ b/pingaccess/config.go @@ -25,7 +25,7 @@ func (c *config) Client() (interface{}, diag.Diagnostics) { client := pingaccess.NewClient(c.Username, c.Password, url, c.Context, nil) if os.Getenv("TF_LOG") == "DEBUG" || os.Getenv("TF_LOG") == "TRACE" { - client.LogDebug(true) + client.LogDebug = true } return client, nil } diff --git a/pingaccess/data_source_pingaccess_acme_default.go b/pingaccess/data_source_pingaccess_acme_default.go index 6b2062d9..67f9e924 100644 --- a/pingaccess/data_source_pingaccess_acme_default.go +++ b/pingaccess/data_source_pingaccess_acme_default.go @@ -29,7 +29,7 @@ func dataSourcePingAccessAcmeDefaultRead(ctx context.Context, d *schema.Resource svc := m.(*pa.Client).Acme result, resp, err := svc.GetDefaultAcmeServerCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read ACME Default: %s\n%v", err.Error(), resp))} + return diag.FromErr(fmt.Errorf("unable to read ACME Default: %s\n%v", err.Error(), resp)) } d.SetId(*result.Id) diff --git a/pingaccess/data_source_pingaccess_certificate.go b/pingaccess/data_source_pingaccess_certificate.go index 16f884f1..0737d103 100644 --- a/pingaccess/data_source_pingaccess_certificate.go +++ b/pingaccess/data_source_pingaccess_certificate.go @@ -3,6 +3,7 @@ package pingaccess import ( "context" "fmt" + "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -26,12 +27,12 @@ func dataSourcePingAccessCertificateRead(ctx context.Context, d *schema.Resource } result, _, err := svc.GetTrustedCerts(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read Certificate: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read Certificate: %s", err)) } if len(result.Items) != 1 { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to find Certificate with alias '%s' found '%d' results", d.Get("alias").(string), len(result.Items)))} + return diag.FromErr(fmt.Errorf("unable to find Certificate with alias '%s' found '%d' results", d.Get("alias").(string), len(result.Items))) } - d.SetId(result.Items[0].Id.String()) + d.SetId(strconv.Itoa(*result.Items[0].Id)) return resourcePingAccessCertificateReadResult(d, result.Items[0]) } diff --git a/pingaccess/data_source_pingaccess_keypair.go b/pingaccess/data_source_pingaccess_keypair.go index a4d183e1..b10fc69e 100644 --- a/pingaccess/data_source_pingaccess_keypair.go +++ b/pingaccess/data_source_pingaccess_keypair.go @@ -3,6 +3,7 @@ package pingaccess import ( "context" "fmt" + "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -27,12 +28,11 @@ func dataSourcePingAccessKeyPairRead(ctx context.Context, d *schema.ResourceData } result, _, err := svc.GetKeyPairsCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read KeyPair: %s", err))} - + return diag.FromErr(fmt.Errorf("unable to read KeyPair: %s", err)) } if len(result.Items) != 1 { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to find KeyPair with alias '%s' found '%d' results", d.Get("alias").(string), len(result.Items)))} + return diag.FromErr(fmt.Errorf("unable to find KeyPair with alias '%s' found '%d' results", d.Get("alias").(string), len(result.Items))) } - d.SetId(result.Items[0].Id.String()) + d.SetId(strconv.Itoa(*result.Items[0].Id)) return resourcePingAccessKeyPairReadResult(d, result.Items[0]) } diff --git a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go index 95bfdd8b..d541e556 100644 --- a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go +++ b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go @@ -90,33 +90,33 @@ func dataSourcePingAccessPingFederateRuntimeMetadataSchema() map[string]*schema. } func dataSourcePingAccessPingFederateRuntimeMetadataRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).PingFederate + svc := m.(*pa.Client).Pingfederate result, resp, err := svc.GetPingFederateMetadataCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read PingFederate Runtime Metadata: %s\n%v", err, resp))} + return diag.FromErr(fmt.Errorf("unable to read PingFederate Runtime Metadata: %s\n%v", err, resp)) } var diags diag.Diagnostics d.SetId("pingfederate_runtime_metadata") setResourceDataStringWithDiagnostic(d, "authorization_endpoint", result.Authorization_endpoint, &diags) setResourceDataStringWithDiagnostic(d, "backchannel_authentication_endpoint", result.Backchannel_authentication_endpoint, &diags) if err := d.Set("claim_types_supported", result.Claim_types_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } if err := d.Set("claims_parameter_supported", result.Claims_parameter_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } if err := d.Set("claims_supported", result.Claims_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } if err := d.Set("code_challenge_methods_supported", result.Code_challenge_methods_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } setResourceDataStringWithDiagnostic(d, "end_session_endpoint", result.End_session_endpoint, &diags) if err := d.Set("grant_types_supported", result.Grant_types_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } if err := d.Set("id_token_signing_alg_values_supported", result.Id_token_signing_alg_values_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } setResourceDataStringWithDiagnostic(d, "introspection_endpoint", result.Introspection_endpoint, &diags) setResourceDataStringWithDiagnostic(d, "issuer", result.Issuer, &diags) @@ -124,30 +124,30 @@ func dataSourcePingAccessPingFederateRuntimeMetadataRead(ctx context.Context, d setResourceDataStringWithDiagnostic(d, "ping_end_session_endpoint", result.Ping_end_session_endpoint, &diags) setResourceDataStringWithDiagnostic(d, "ping_revoked_sris_endpoint", result.Ping_revoked_sris_endpoint, &diags) if err := d.Set("request_object_signing_alg_values_supported", result.Request_object_signing_alg_values_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } setResourceDataBoolWithDiagnostic(d, "request_parameter_supported", result.Request_parameter_supported, &diags) setResourceDataBoolWithDiagnostic(d, "request_uri_parameter_supported", result.Request_uri_parameter_supported, &diags) if err := d.Set("response_modes_supported", result.Response_modes_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } if err := d.Set("response_types_supported", result.Response_types_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } setResourceDataStringWithDiagnostic(d, "revocation_endpoint", result.Revocation_endpoint, &diags) if err := d.Set("scopes_supported", result.Scopes_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } if err := d.Set("subject_types_supported", result.Subject_types_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } setResourceDataStringWithDiagnostic(d, "token_endpoint", result.Token_endpoint, &diags) if err := d.Set("token_endpoint_auth_methods_supported", result.Token_endpoint_auth_methods_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } setResourceDataStringWithDiagnostic(d, "userinfo_endpoint", result.Userinfo_endpoint, &diags) if err := d.Set("userinfo_signing_alg_values_supported", result.Userinfo_signing_alg_values_supported); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } return diags } diff --git a/pingaccess/data_source_pingaccess_trusted_certificate_groups.go b/pingaccess/data_source_pingaccess_trusted_certificate_groups.go index 2a6382d5..56d31d9e 100644 --- a/pingaccess/data_source_pingaccess_trusted_certificate_groups.go +++ b/pingaccess/data_source_pingaccess_trusted_certificate_groups.go @@ -55,13 +55,13 @@ func dataSourcePingAccessTrustedCertificateGroupsRead(ctx context.Context, d *sc } result, _, err := svc.GetTrustedCertificateGroupsCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read TrustedCertificateGroup: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read TrustedCertificateGroup: %s", err)) } if result == nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to find TrustedCertificateGroup with the name '%s', result was nil", d.Get("name").(string)))} + return diag.FromErr(fmt.Errorf("unable to find TrustedCertificateGroup with the name '%s', result was nil", d.Get("name").(string))) } if len(result.Items) != 1 { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to find TrustedCertificateGroup with the name '%s' found '%d' results", d.Get("name").(string), len(result.Items)))} + return diag.FromErr(fmt.Errorf("unable to find TrustedCertificateGroup with the name '%s' found '%d' results", d.Get("name").(string), len(result.Items))) } d.SetId(result.Items[0].Id.String()) return resourcePingAccessTrustedCertificateGroupsReadResult(d, result.Items[0]) diff --git a/pingaccess/provider.go b/pingaccess/provider.go index 93d7785f..fb2592f4 100644 --- a/pingaccess/provider.go +++ b/pingaccess/provider.go @@ -113,7 +113,7 @@ func String(v string) *string { return &v } func setResourceDataStringWithDiagnostic(d *schema.ResourceData, name string, data *string, diags *diag.Diagnostics) { if data != nil { if err := d.Set(name, *data); err != nil { - *diags = append(*diags, diag.FromErr(err)) + *diags = append(*diags, diag.FromErr(err)...) } } } @@ -121,7 +121,7 @@ func setResourceDataStringWithDiagnostic(d *schema.ResourceData, name string, da func setResourceDataIntWithDiagnostic(d *schema.ResourceData, name string, data *int, diags *diag.Diagnostics) { if data != nil { if err := d.Set(name, *data); err != nil { - *diags = append(*diags, diag.FromErr(err)) + *diags = append(*diags, diag.FromErr(err)...) } } } @@ -129,7 +129,7 @@ func setResourceDataIntWithDiagnostic(d *schema.ResourceData, name string, data func setResourceDataBoolWithDiagnostic(d *schema.ResourceData, name string, data *bool, diags *diag.Diagnostics) { if data != nil { if err := d.Set(name, *data); err != nil { - *diags = append(*diags, diag.FromErr(err)) + *diags = append(*diags, diag.FromErr(err)...) } } } diff --git a/pingaccess/provider_test.go b/pingaccess/provider_test.go index 8907b80b..a5d52011 100644 --- a/pingaccess/provider_test.go +++ b/pingaccess/provider_test.go @@ -18,7 +18,6 @@ import ( "testing" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" @@ -27,7 +26,6 @@ import ( ) func TestMain(m *testing.M) { - acctest.UseBinaryDriver("pingaccess", Provider) _, acceptanceTesting := os.LookupEnv("TF_ACC") if acceptanceTesting { pool, err := dockertest.NewPool("") diff --git a/pingaccess/resource_pingaccess_access_token_validator.go b/pingaccess/resource_pingaccess_access_token_validator.go index edc01d4d..aead2047 100644 --- a/pingaccess/resource_pingaccess_access_token_validator.go +++ b/pingaccess/resource_pingaccess_access_token_validator.go @@ -59,7 +59,7 @@ func resourcePingAccessAccessTokenValidatorCreate(ctx context.Context, d *schema result, _, err := svc.AddAccessTokenValidatorCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create AccessTokenValidator: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create AccessTokenValidator: %s", err)) } d.SetId(result.Id.String()) @@ -75,7 +75,7 @@ func resourcePingAccessAccessTokenValidatorRead(ctx context.Context, d *schema.R result, _, err := svc.GetAccessTokenValidatorCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read AccessTokenValidator: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read AccessTokenValidator: %s", err)) } return resourcePingAccessAccessTokenValidatorReadResult(d, result, svc) @@ -90,7 +90,7 @@ func resourcePingAccessAccessTokenValidatorUpdate(ctx context.Context, d *schema result, _, err := svc.UpdateAccessTokenValidatorCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update AccessTokenValidator: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update AccessTokenValidator: %s", err)) } d.SetId(result.Id.String()) @@ -101,12 +101,12 @@ func resourcePingAccessAccessTokenValidatorDelete(ctx context.Context, d *schema svc := m.(*pingaccess.Client).AccessTokenValidators _, err := svc.DeleteAccessTokenValidatorCommand(&pingaccess.DeleteAccessTokenValidatorCommandInput{Id: d.Id()}) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete AccessTokenValidator: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete AccessTokenValidator: %s", err)) } return nil } -func resourcePingAccessAccessTokenValidatorReadResult(d *schema.ResourceData, input *pingaccess.AccessTokenValidatorView, svc *pingaccess.AccessTokenValidatorsService) diag.Diagnostics { +func resourcePingAccessAccessTokenValidatorReadResult(d *schema.ResourceData, input *pingaccess.AccessTokenValidatorView, svc pingaccess.AccessTokenValidatorsAPI) diag.Diagnostics { var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) diff --git a/pingaccess/resource_pingaccess_acme_server.go b/pingaccess/resource_pingaccess_acme_server.go index 6f282ab8..8d53f11e 100644 --- a/pingaccess/resource_pingaccess_acme_server.go +++ b/pingaccess/resource_pingaccess_acme_server.go @@ -46,7 +46,7 @@ func resourcePingAccessAcmeServerCreate(ctx context.Context, d *schema.ResourceD result, _, err := svc.AddAcmeServerCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create AcmeServer: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create AcmeServer: %s", err)) } d.SetId(*result.Id) return resourcePingAccessAcmeServerReadResult(d, &input.Body) @@ -59,7 +59,7 @@ func resourcePingAccessAcmeServerRead(ctx context.Context, d *schema.ResourceDat } result, _, err := svc.GetAcmeServerCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read AcmeServer: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read AcmeServer: %s", err)) } return resourcePingAccessAcmeServerReadResult(d, result) } @@ -70,9 +70,9 @@ func resourcePingAccessAcmeServerDelete(ctx context.Context, d *schema.ResourceD AcmeServerId: d.Id(), } - _, _, err := svc.DeleteAcmeServerCommand(input) + _, resp, err := svc.DeleteAcmeServerCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete AcmeServer: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete AcmeServer: %s resp: %v", err, *resp)) } return nil } @@ -83,7 +83,7 @@ func resourcePingAccessAcmeServerReadResult(d *schema.ResourceData, input *pinga setResourceDataStringWithDiagnostic(d, "url", input.Url, &diags) if input.AcmeAccounts != nil && len(input.AcmeAccounts) > 0 { if err := d.Set("acme_accounts", flattenLinkViewList(input.AcmeAccounts)); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } return diags diff --git a/pingaccess/resource_pingaccess_application.go b/pingaccess/resource_pingaccess_application.go index cd98787c..ad38a664 100644 --- a/pingaccess/resource_pingaccess_application.go +++ b/pingaccess/resource_pingaccess_application.go @@ -3,7 +3,6 @@ package pingaccess import ( "context" "fmt" - "log" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -131,7 +130,7 @@ func resourcePingAccessApplicationCreate(ctx context.Context, d *schema.Resource } result, _, err := svc.AddApplicationCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create Application: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create Application: %s", err)) } d.SetId(result.Id.String()) @@ -145,7 +144,7 @@ func resourcePingAccessApplicationRead(ctx context.Context, d *schema.ResourceDa } result, _, err := svc.GetApplicationCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read Application: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read Application: %s", err)) } return resourcePingAccessApplicationReadResult(d, result) } @@ -158,26 +157,22 @@ func resourcePingAccessApplicationUpdate(ctx context.Context, d *schema.Resource } result, _, err := svc.UpdateApplicationCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update Application: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update Application: %s", err)) } return resourcePingAccessApplicationReadResult(d, result) } func resourcePingAccessApplicationDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Printf("[INFO] resourcePingAccessApplicationDelete") svc := m.(*pa.Client).Applications input := &pa.DeleteApplicationCommandInput{ Id: d.Id(), } - log.Printf("[INFO] ResourceID: %s", d.Id()) - log.Printf("[INFO] DeleteApplicationCommandInput: %s", input.Id) _, _, err := svc.DeleteApplicationCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete Application: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete Application: %s", err)) } - log.Println("[DEBUG] End - resourcePingAccessSiteDelete") return nil } @@ -205,7 +200,7 @@ func resourcePingAccessApplicationReadResult(d *schema.ResourceData, rv *pa.Appl vhs = append(vhs, strconv.Itoa(*vh)) } if err := d.Set("virtual_host_ids", vhs); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } @@ -215,12 +210,12 @@ func resourcePingAccessApplicationReadResult(d *schema.ResourceData, rv *pa.Appl if rv.IdentityMappingIds != nil && (*rv.IdentityMappingIds["Web"] != 0 || *rv.IdentityMappingIds["API"] != 0) { if err := d.Set("identity_mapping_ids", flattenIdentityMappingIds(rv.IdentityMappingIds)); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } if rv.Policy != nil && (len(*rv.Policy["API"]) > 0 || len(*rv.Policy["Web"]) > 0) { if err := d.Set("policy", flattenPolicy(rv.Policy)); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } return diags diff --git a/pingaccess/resource_pingaccess_application_resource.go b/pingaccess/resource_pingaccess_application_resource.go index e08fb684..ce0a20d9 100644 --- a/pingaccess/resource_pingaccess_application_resource.go +++ b/pingaccess/resource_pingaccess_application_resource.go @@ -115,7 +115,7 @@ func resourcePingAccessApplicationResourceCreate(ctx context.Context, d *schema. } result, _, err := svc.GetApplicationResourcesCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create ApplicationResource: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create ApplicationResource: %s", err)) } rv := result.Items[0] d.SetId(rv.Id.String()) @@ -130,7 +130,7 @@ func resourcePingAccessApplicationResourceCreate(ctx context.Context, d *schema. result, _, err := svc.AddApplicationResourceCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create ApplicationResource: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create ApplicationResource: %s", err)) } d.SetId(result.Id.String()) @@ -146,7 +146,7 @@ func resourcePingAccessApplicationResourceRead(ctx context.Context, d *schema.Re result, _, err := svc.GetApplicationResourceCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read ApplicationResource: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read ApplicationResource: %s", err)) } return resourcePingAccessApplicationResourceReadResult(d, result) @@ -162,7 +162,7 @@ func resourcePingAccessApplicationResourceUpdate(ctx context.Context, d *schema. result, _, err := svc.UpdateApplicationResourceCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update ApplicationResource: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update ApplicationResource: %s", err)) } return resourcePingAccessApplicationResourceReadResult(d, result) } @@ -181,7 +181,7 @@ func resourcePingAccessApplicationResourceDelete(ctx context.Context, d *schema. _, err := svc.DeleteApplicationResourceCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete ApplicationResource: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete ApplicationResource: %s", err)) } return nil } @@ -204,14 +204,14 @@ func resourcePingAccessApplicationResourceReadResult(d *schema.ResourceData, rv setResourceDataStringWithDiagnostic(d, "default_auth_type_override", rv.DefaultAuthTypeOverride, &diags) setResourceDataBoolWithDiagnostic(d, "enabled", rv.Enabled, &diags) if err := d.Set("methods", *rv.Methods); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } if err := d.Set("name", *rv.Name); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } if rv.PathPrefixes != nil { if err := d.Set("path_prefixes", *rv.PathPrefixes); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } setResourceDataBoolWithDiagnostic(d, "root_resource", rv.RootResource, &diags) @@ -219,7 +219,7 @@ func resourcePingAccessApplicationResourceReadResult(d *schema.ResourceData, rv if rv.Policy != nil && (len(*rv.Policy["Web"]) != 0 || len(*rv.Policy["API"]) != 0) { if err := d.Set("policy", flattenPolicy(rv.Policy)); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } return diags diff --git a/pingaccess/resource_pingaccess_auth_token_management.go b/pingaccess/resource_pingaccess_auth_token_management.go index 0ac01998..50d1b07a 100644 --- a/pingaccess/resource_pingaccess_auth_token_management.go +++ b/pingaccess/resource_pingaccess_auth_token_management.go @@ -59,23 +59,23 @@ func resourcePingAccessAuthTokenManagementCreate(ctx context.Context, d *schema. } func resourcePingAccessAuthTokenManagementRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).AuthTokenManagements + svc := m.(*pa.Client).AuthTokenManagement result, _, err := svc.GetAuthTokenManagementCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read AuthTokenManagement: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read AuthTokenManagement: %s", err)) } return resourcePingAccessAuthTokenManagementReadResult(d, result) } func resourcePingAccessAuthTokenManagementUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).AuthTokenManagements + svc := m.(*pa.Client).AuthTokenManagement input := pa.UpdateAuthTokenManagementCommandInput{ Body: *resourcePingAccessAuthTokenManagementReadData(d), } result, _, err := svc.UpdateAuthTokenManagementCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update AuthTokenManagement: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update AuthTokenManagement: %s", err)) } d.SetId("auth_token_management") @@ -83,10 +83,10 @@ func resourcePingAccessAuthTokenManagementUpdate(ctx context.Context, d *schema. } func resourcePingAccessAuthTokenManagementDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).AuthTokenManagements + svc := m.(*pa.Client).AuthTokenManagement _, err := svc.DeleteAuthTokenManagementCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete AuthTokenManagement: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete AuthTokenManagement: %s", err)) } return nil diff --git a/pingaccess/resource_pingaccess_auth_token_management_test.go b/pingaccess/resource_pingaccess_auth_token_management_test.go index 18ac9894..7189fdf0 100644 --- a/pingaccess/resource_pingaccess_auth_token_management_test.go +++ b/pingaccess/resource_pingaccess_auth_token_management_test.go @@ -58,7 +58,7 @@ func testAccCheckPingAccessAuthTokenManagementExists(n string) resource.TestChec return fmt.Errorf("No auth token management ID is set") } - conn := testAccProvider.Meta().(*pa.Client).AuthTokenManagements + conn := testAccProvider.Meta().(*pa.Client).AuthTokenManagement result, _, err := conn.GetAuthTokenManagementCommand() if err != nil { diff --git a/pingaccess/resource_pingaccess_authn_req_list.go b/pingaccess/resource_pingaccess_authn_req_list.go index 9e24e6eb..073641dd 100644 --- a/pingaccess/resource_pingaccess_authn_req_list.go +++ b/pingaccess/resource_pingaccess_authn_req_list.go @@ -41,7 +41,7 @@ func resourcePingAccessAuthnReqListCreate(ctx context.Context, d *schema.Resourc result, _, err := svc.AddAuthnReqListCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create AuthnReqList: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create AuthnReqList: %s", err)) } d.SetId(result.Id.String()) @@ -55,7 +55,7 @@ func resourcePingAccessAuthnReqListRead(ctx context.Context, d *schema.ResourceD } result, _, err := svc.GetAuthnReqListCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read AuthnReqList: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read AuthnReqList: %s", err)) } return resourcePingAccessAuthnReqListReadResult(d, result) } @@ -69,7 +69,7 @@ func resourcePingAccessAuthnReqListUpdate(ctx context.Context, d *schema.Resourc result, _, err := svc.UpdateAuthnReqListCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update AuthnReqList: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update AuthnReqList: %s", err)) } return resourcePingAccessAuthnReqListReadResult(d, result) } @@ -82,7 +82,7 @@ func resourcePingAccessAuthnReqListDelete(ctx context.Context, d *schema.Resourc _, err := svc.DeleteAuthnReqListCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete AuthnReqList: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete AuthnReqList: %s", err)) } return nil } @@ -91,7 +91,7 @@ func resourcePingAccessAuthnReqListReadResult(d *schema.ResourceData, input *pin var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) if err := d.Set("authn_reqs", input.AuthnReqs); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } return diags } diff --git a/pingaccess/resource_pingaccess_certificate.go b/pingaccess/resource_pingaccess_certificate.go index 6ed1a2ac..40feade9 100644 --- a/pingaccess/resource_pingaccess_certificate.go +++ b/pingaccess/resource_pingaccess_certificate.go @@ -3,6 +3,7 @@ package pingaccess import ( "context" "fmt" + "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -88,10 +89,10 @@ func resourcePingAccessCertificateCreate(ctx context.Context, d *schema.Resource result, _, err := svc.ImportTrustedCert(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create Certificate: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create Certificate: %s", err)) } - d.SetId(result.Id.String()) + d.SetId(strconv.Itoa(*result.Id)) return resourcePingAccessCertificateReadResult(d, result) } @@ -102,7 +103,7 @@ func resourcePingAccessCertificateRead(ctx context.Context, d *schema.ResourceDa } result, _, err := svc.GetTrustedCert(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read Certificate: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read Certificate: %s", err)) } return resourcePingAccessCertificateReadResult(d, result) } @@ -120,10 +121,10 @@ func resourcePingAccessCertificateUpdate(ctx context.Context, d *schema.Resource result, _, err := svc.UpdateTrustedCert(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update Certificate: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update Certificate: %s", err)) } - d.SetId(result.Id.String()) + d.SetId(strconv.Itoa(*result.Id)) return resourcePingAccessCertificateReadResult(d, result) } @@ -131,7 +132,7 @@ func resourcePingAccessCertificateDelete(ctx context.Context, d *schema.Resource svc := m.(*pa.Client).Certificates _, err := svc.DeleteTrustedCertCommand(&pa.DeleteTrustedCertCommandInput{Id: d.Id()}) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete Certificate: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete Certificate: %s", err)) } return nil } diff --git a/pingaccess/resource_pingaccess_engine_listener.go b/pingaccess/resource_pingaccess_engine_listener.go index ae028f2b..71d66d39 100644 --- a/pingaccess/resource_pingaccess_engine_listener.go +++ b/pingaccess/resource_pingaccess_engine_listener.go @@ -54,7 +54,7 @@ func resourcePingAccessEngineListenerCreate(ctx context.Context, d *schema.Resou result, _, err := svc.AddEngineListenerCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create EngineListener: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create EngineListener: %s", err)) } d.SetId(result.Id.String()) @@ -68,7 +68,7 @@ func resourcePingAccessEngineListenerRead(ctx context.Context, d *schema.Resourc } result, _, err := svc.GetEngineListenerCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read EngineListener: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read EngineListener: %s", err)) } return resourcePingAccessEngineListenerReadResult(d, result) } @@ -82,7 +82,7 @@ func resourcePingAccessEngineListenerUpdate(ctx context.Context, d *schema.Resou result, _, err := svc.UpdateEngineListenerCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update EngineListener: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update EngineListener: %s", err)) } return resourcePingAccessEngineListenerReadResult(d, result) } @@ -95,7 +95,7 @@ func resourcePingAccessEngineListenerDelete(ctx context.Context, d *schema.Resou _, err := svc.DeleteEngineListenerCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete EngineListener: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete EngineListener: %s", err)) } return nil diff --git a/pingaccess/resource_pingaccess_hsm_provider.go b/pingaccess/resource_pingaccess_hsm_provider.go index c5d47231..6b7cfe24 100644 --- a/pingaccess/resource_pingaccess_hsm_provider.go +++ b/pingaccess/resource_pingaccess_hsm_provider.go @@ -59,7 +59,7 @@ func resourcePingAccessHsmProviderCreate(ctx context.Context, d *schema.Resource result, _, err := svc.AddHsmProviderCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create HsmProvider: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create HsmProvider: %s", err)) } d.SetId(result.Id.String()) @@ -73,7 +73,7 @@ func resourcePingAccessHsmProviderRead(ctx context.Context, d *schema.ResourceDa } result, _, err := svc.GetHsmProviderCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read HsmProvider: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read HsmProvider: %s", err)) } return resourcePingAccessHsmProviderReadResult(d, result, svc) } @@ -86,7 +86,7 @@ func resourcePingAccessHsmProviderUpdate(ctx context.Context, d *schema.Resource } result, _, err := svc.UpdateHsmProviderCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update HsmProvider: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update HsmProvider: %s", err)) } d.SetId(result.Id.String()) return resourcePingAccessHsmProviderReadResult(d, result, svc) @@ -98,12 +98,12 @@ func resourcePingAccessHsmProviderDelete(ctx context.Context, d *schema.Resource _, err := svc.DeleteHsmProviderCommand(&pingaccess.DeleteHsmProviderCommandInput{Id: d.Id()}) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete HsmProvider: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete HsmProvider: %s", err)) } return nil } -func resourcePingAccessHsmProviderReadResult(d *schema.ResourceData, input *pingaccess.HsmProviderView, svc *pingaccess.HsmProvidersService) diag.Diagnostics { +func resourcePingAccessHsmProviderReadResult(d *schema.ResourceData, input *pingaccess.HsmProviderView, svc pingaccess.HsmProvidersAPI) diag.Diagnostics { var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) diff --git a/pingaccess/resource_pingaccess_http_config_request_host_source.go b/pingaccess/resource_pingaccess_http_config_request_host_source.go index 496a043e..8010e35d 100644 --- a/pingaccess/resource_pingaccess_http_config_request_host_source.go +++ b/pingaccess/resource_pingaccess_http_config_request_host_source.go @@ -41,7 +41,7 @@ func resourcePingAccessHTTPConfigRequestHostSourceRead(ctx context.Context, d *s svc := m.(*pa.Client).HttpConfig result, _, err := svc.GetHostSourceCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read HttpConfigHostSource: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read HttpConfigHostSource: %s", err)) } return resourcePingAccessHTTPConfigRequestHostSourceReadResult(d, result) } @@ -51,7 +51,7 @@ func resourcePingAccessHTTPConfigRequestHostSourceUpdate(ctx context.Context, d input := &pa.UpdateHostSourceCommandInput{Body: *resourcePingAccessHTTPConfigRequestHostSourceReadData(d)} result, _, err := svc.UpdateHostSourceCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update HttpConfigHostSource: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update HttpConfigHostSource: %s", err)) } d.SetId("http_config_host_source") @@ -62,7 +62,7 @@ func resourcePingAccessHTTPConfigRequestHostSourceDelete(ctx context.Context, d svc := m.(*pa.Client).HttpConfig _, err := svc.DeleteHostSourceCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete HttpConfigHostSource: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete HttpConfigHostSource: %s", err)) } return nil @@ -72,7 +72,7 @@ func resourcePingAccessHTTPConfigRequestHostSourceReadResult(d *schema.ResourceD var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "list_value_location", rv.ListValueLocation, &diags) if err := d.Set("header_name_list", rv.HeaderNameList); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } return diags } diff --git a/pingaccess/resource_pingaccess_https_listener.go b/pingaccess/resource_pingaccess_https_listener.go index 6419cb4d..8838a467 100644 --- a/pingaccess/resource_pingaccess_https_listener.go +++ b/pingaccess/resource_pingaccess_https_listener.go @@ -48,7 +48,7 @@ func resourcePingAccessHTTPSListenerCreate(ctx context.Context, d *schema.Resour result, _, err := svc.GetHttpsListenersCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to retrieving listener: %s", err))} + return diag.FromErr(fmt.Errorf("unable to retrieving listener: %s", err)) } name := d.Get("name").(string) @@ -58,7 +58,7 @@ func resourcePingAccessHTTPSListenerCreate(ctx context.Context, d *schema.Resour return resourcePingAccessHTTPSListenerReadResult(d, listener) } } - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to manage listener: %s", err))} + return diag.FromErr(fmt.Errorf("unable to manage listener: %s", err)) } func resourcePingAccessHTTPSListenerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -68,7 +68,7 @@ func resourcePingAccessHTTPSListenerRead(ctx context.Context, d *schema.Resource } result, _, err := svc.GetHttpsListenerCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read listener: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read listener: %s", err)) } return resourcePingAccessHTTPSListenerReadResult(d, result) } @@ -82,7 +82,7 @@ func resourcePingAccessHTTPSListenerUpdate(ctx context.Context, d *schema.Resour result, _, err := svc.UpdateHttpsListener(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update listener: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update listener: %s", err)) } return resourcePingAccessHTTPSListenerReadResult(d, result) } diff --git a/pingaccess/resource_pingaccess_identity_mapping.go b/pingaccess/resource_pingaccess_identity_mapping.go index 9019bb14..a79dc221 100644 --- a/pingaccess/resource_pingaccess_identity_mapping.go +++ b/pingaccess/resource_pingaccess_identity_mapping.go @@ -63,7 +63,7 @@ func resourcePingAccessIdentityMappingCreate(ctx context.Context, d *schema.Reso result, _, err := svc.AddIdentityMappingCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create IdentityMapping: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create IdentityMapping: %s", err)) } d.SetId(result.Id.String()) @@ -78,7 +78,7 @@ func resourcePingAccessIdentityMappingRead(ctx context.Context, d *schema.Resour result, _, err := svc.GetIdentityMappingCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read IdentityMapping: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read IdentityMapping: %s", err)) } return resourcePingAccessIdentityMappingReadResult(d, result, svc) } @@ -92,7 +92,7 @@ func resourcePingAccessIdentityMappingUpdate(ctx context.Context, d *schema.Reso result, _, err := svc.UpdateIdentityMappingCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update IdentityMapping: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update IdentityMapping: %s", err)) } d.SetId(result.Id.String()) @@ -106,12 +106,12 @@ func resourcePingAccessIdentityMappingDelete(ctx context.Context, d *schema.Reso log.Printf("[INFO] ResourceID: %s", d.Id()) _, err := svc.DeleteIdentityMappingCommand(&pingaccess.DeleteIdentityMappingCommandInput{Id: d.Id()}) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete IdentityMapping: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete IdentityMapping: %s", err)) } return nil } -func resourcePingAccessIdentityMappingReadResult(d *schema.ResourceData, input *pingaccess.IdentityMappingView, svc *pingaccess.IdentityMappingsService) diag.Diagnostics { +func resourcePingAccessIdentityMappingReadResult(d *schema.ResourceData, input *pingaccess.IdentityMappingView, svc pingaccess.IdentityMappingsAPI) diag.Diagnostics { var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) diff --git a/pingaccess/resource_pingaccess_keypair.go b/pingaccess/resource_pingaccess_keypair.go index c6ea5b8b..8c5a8d8a 100644 --- a/pingaccess/resource_pingaccess_keypair.go +++ b/pingaccess/resource_pingaccess_keypair.go @@ -120,10 +120,10 @@ func resourcePingAccessKeyPairCreate(ctx context.Context, d *schema.ResourceData result, _, err := svc.ImportKeyPairCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create KeyPair: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create KeyPair: %s", err)) } - d.SetId(result.Id.String()) + d.SetId(strconv.Itoa(*result.Id)) return resourcePingAccessKeyPairReadResult(d, result) } @@ -134,7 +134,7 @@ func resourcePingAccessKeyPairRead(ctx context.Context, d *schema.ResourceData, } result, _, err := svc.GetKeyPairCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read KeyPair: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read KeyPair: %s", err)) } return resourcePingAccessKeyPairReadResult(d, result) } @@ -158,10 +158,10 @@ func resourcePingAccessKeyPairUpdate(ctx context.Context, d *schema.ResourceData result, _, err := svc.UpdateKeyPairCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update KeyPair: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update KeyPair: %s", err)) } - d.SetId(result.Id.String()) + d.SetId(strconv.Itoa(*result.Id)) return resourcePingAccessKeyPairReadResult(d, result) } @@ -169,7 +169,7 @@ func resourcePingAccessKeyPairDelete(ctx context.Context, d *schema.ResourceData svc := m.(*pa.Client).KeyPairs _, err := svc.DeleteKeyPairCommand(&pa.DeleteKeyPairCommandInput{Id: d.Id()}) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete KeyPair: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete KeyPair: %s", err)) } return nil } @@ -179,7 +179,8 @@ func resourcePingAccessKeyPairReadResult(d *schema.ResourceData, rv *pa.KeyPairV setResourceDataStringWithDiagnostic(d, "alias", rv.Alias, &diags) if rv.ChainCertificates != nil { if err := d.Set("chain_certificates", flattenChainCertificates(rv.ChainCertificates)); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) + } } setResourceDataBoolWithDiagnostic(d, "csr_pending", rv.CsrPending, &diags) diff --git a/pingaccess/resource_pingaccess_oauth_server.go b/pingaccess/resource_pingaccess_oauth_server.go index 8005628b..4c6b3996 100644 --- a/pingaccess/resource_pingaccess_oauth_server.go +++ b/pingaccess/resource_pingaccess_oauth_server.go @@ -86,23 +86,23 @@ func resourcePingAccessOAuthServerCreate(ctx context.Context, d *schema.Resource } func resourcePingAccessOAuthServerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).OAuth + svc := m.(*pa.Client).Oauth result, _, err := svc.GetAuthorizationServerCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read OAuthServerSettings: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read OAuthServerSettings: %s", err)) } return resourcePingAccessOAuthServerReadResult(d, result) } func resourcePingAccessOAuthServerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).OAuth + svc := m.(*pa.Client).Oauth input := pa.UpdateAuthorizationServerCommandInput{ Body: *resourcePingAccessOAuthServerReadData(d), } result, _, err := svc.UpdateAuthorizationServerCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update OAuthServerSettings: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update OAuthServerSettings: %s", err)) } d.SetId("oauth_server_settings") @@ -110,10 +110,10 @@ func resourcePingAccessOAuthServerUpdate(ctx context.Context, d *schema.Resource } func resourcePingAccessOAuthServerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).OAuth + svc := m.(*pa.Client).Oauth _, err := svc.DeleteAuthorizationServerCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to reset OAuthServerSettings: %s", err))} + return diag.FromErr(fmt.Errorf("unable to reset OAuthServerSettings: %s", err)) } return nil } @@ -132,7 +132,7 @@ func resourcePingAccessOAuthServerReadResult(d *schema.ResourceData, input *pa.A setResourceDataBoolWithDiagnostic(d, "use_proxy", input.UseProxy, &diags) if err := d.Set("targets", input.Targets); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } if input.ClientCredentials != nil { //TODO we should look at encrypting the value @@ -142,7 +142,7 @@ func resourcePingAccessOAuthServerReadResult(d *schema.ResourceData, input *pa.A creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = pw } if err := d.Set("client_credentials", creds); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } diff --git a/pingaccess/resource_pingaccess_oauth_server_test.go b/pingaccess/resource_pingaccess_oauth_server_test.go index 0090e8cd..03b32c99 100644 --- a/pingaccess/resource_pingaccess_oauth_server_test.go +++ b/pingaccess/resource_pingaccess_oauth_server_test.go @@ -35,7 +35,7 @@ func TestAccPingAccessOAuthServer(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckPingAccessOAuthServerExists("pingaccess_oauth_server.demo_pfr"), ), - ExpectError: regexp.MustCompile(`unable to update OAuthServerSettings: \[Save Failed\]\nIntrospection endpoint must be a valid relative path`), + ExpectError: regexp.MustCompile(`unable to update OAuthServerSettings: Save Failed:\nintrospectionEndpoint contains 1 validation failures:\n\tIntrospection endpoint must be a valid relative path`), }, }, }) @@ -73,7 +73,7 @@ func testAccCheckPingAccessOAuthServerExists(n string) resource.TestCheckFunc { return fmt.Errorf("No third party service ID is set") } - conn := testAccProvider.Meta().(*pa.Client).OAuth + conn := testAccProvider.Meta().(*pa.Client).Oauth result, _, err := conn.GetAuthorizationServerCommand() if err != nil { diff --git a/pingaccess/resource_pingaccess_pingfederate_admin.go b/pingaccess/resource_pingaccess_pingfederate_admin.go index 421c3632..4d58baf0 100644 --- a/pingaccess/resource_pingaccess_pingfederate_admin.go +++ b/pingaccess/resource_pingaccess_pingfederate_admin.go @@ -71,23 +71,23 @@ func resourcePingAccessPingFederateAdminCreate(ctx context.Context, d *schema.Re } func resourcePingAccessPingFederateAdminRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).PingFederate + svc := m.(*pa.Client).Pingfederate result, _, err := svc.GetPingFederateAdminCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read PingFederateAdmin: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read PingFederateAdmin: %s", err)) } return resourcePingAccessPingFederateAdminReadResult(d, result) } func resourcePingAccessPingFederateAdminUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).PingFederate + svc := m.(*pa.Client).Pingfederate input := pa.UpdatePingFederateAdminCommandInput{ Body: *resourcePingAccessPingFederateAdminReadData(d), } result, _, err := svc.UpdatePingFederateAdminCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update PingFederateAdmin: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update PingFederateAdmin: %s", err)) } d.SetId("pingfederate_admin_settings") @@ -95,10 +95,10 @@ func resourcePingAccessPingFederateAdminUpdate(ctx context.Context, d *schema.Re } func resourcePingAccessPingFederateAdminDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).PingFederate + svc := m.(*pa.Client).Pingfederate _, err := svc.DeletePingFederateCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to reset PingFederateAdmin: %s", err))} + return diag.FromErr(fmt.Errorf("unable to reset PingFederateAdmin: %s", err)) } return nil } @@ -121,7 +121,7 @@ func resourcePingAccessPingFederateAdminReadResult(d *schema.ResourceData, input creds[0]["value"] = pw } if err := d.Set("admin_password", creds); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } diff --git a/pingaccess/resource_pingaccess_pingfederate_admin_test.go b/pingaccess/resource_pingaccess_pingfederate_admin_test.go index bdcc6133..04996caf 100644 --- a/pingaccess/resource_pingaccess_pingfederate_admin_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_admin_test.go @@ -67,7 +67,7 @@ func testAccCheckPingAccessPingFederateAdminExists(n string) resource.TestCheckF return fmt.Errorf("No third party service ID is set") } - conn := testAccProvider.Meta().(*pa.Client).PingFederate + conn := testAccProvider.Meta().(*pa.Client).Pingfederate result, _, err := conn.GetPingFederateAdminCommand() if err != nil { diff --git a/pingaccess/resource_pingaccess_pingfederate_oauth.go b/pingaccess/resource_pingaccess_pingfederate_oauth.go index 9a7afbf6..4d466276 100644 --- a/pingaccess/resource_pingaccess_pingfederate_oauth.go +++ b/pingaccess/resource_pingaccess_pingfederate_oauth.go @@ -71,23 +71,23 @@ func resourcePingAccessPingFederateOAuthCreate(ctx context.Context, d *schema.Re } func resourcePingAccessPingFederateOAuthRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).PingFederate + svc := m.(*pa.Client).Pingfederate result, _, err := svc.GetPingFederateAccessTokensCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to reset PingFederateOAuth: %s", err))} + return diag.FromErr(fmt.Errorf("unable to reset PingFederateOAuth: %s", err)) } return resourcePingAccessPingFederateOAuthReadResult(d, result) } func resourcePingAccessPingFederateOAuthUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).PingFederate + svc := m.(*pa.Client).Pingfederate input := pa.UpdatePingFederateAccessTokensCommandInput{ Body: *resourcePingAccessPingFederateOAuthReadData(d), } result, _, err := svc.UpdatePingFederateAccessTokensCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to reset PingFederateOAuth: %s", err))} + return diag.FromErr(fmt.Errorf("unable to reset PingFederateOAuth: %s", err)) } d.SetId("pingfederate_oauth_settings") @@ -95,10 +95,10 @@ func resourcePingAccessPingFederateOAuthUpdate(ctx context.Context, d *schema.Re } func resourcePingAccessPingFederateOAuthDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).PingFederate + svc := m.(*pa.Client).Pingfederate _, err := svc.DeletePingFederateAccessTokensCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to reset PingFederateOAuth: %s", err))} + return diag.FromErr(fmt.Errorf("unable to reset PingFederateOAuth: %s", err)) } return nil } @@ -121,7 +121,7 @@ func resourcePingAccessPingFederateOAuthReadResult(d *schema.ResourceData, input creds[0]["value"] = pw } if err := d.Set("client_secret", creds); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } diff --git a/pingaccess/resource_pingaccess_pingfederate_oauth_test.go b/pingaccess/resource_pingaccess_pingfederate_oauth_test.go index 1e8c9b69..7e2974cb 100644 --- a/pingaccess/resource_pingaccess_pingfederate_oauth_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_oauth_test.go @@ -57,7 +57,7 @@ func testAccCheckPingAccessPingFederateOAuthExists(n string) resource.TestCheckF return fmt.Errorf("No third party service ID is set") } - conn := testAccProvider.Meta().(*pa.Client).PingFederate + conn := testAccProvider.Meta().(*pa.Client).Pingfederate result, _, err := conn.GetPingFederateAccessTokensCommand() if err != nil { diff --git a/pingaccess/resource_pingaccess_pingfederate_runtime.go b/pingaccess/resource_pingaccess_pingfederate_runtime.go index c9a3b2e9..352e7e98 100644 --- a/pingaccess/resource_pingaccess_pingfederate_runtime.go +++ b/pingaccess/resource_pingaccess_pingfederate_runtime.go @@ -66,23 +66,23 @@ func resourcePingAccessPingFederateRuntimeCreate(ctx context.Context, d *schema. } func resourcePingAccessPingFederateRuntimeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).PingFederate + svc := m.(*pa.Client).Pingfederate result, _, err := svc.GetPingFederateRuntimeCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read PingFederateRuntime: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read PingFederateRuntime: %s", err)) } return resourcePingAccessPingFederateRuntimeReadResult(d, result) } func resourcePingAccessPingFederateRuntimeUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).PingFederate + svc := m.(*pa.Client).Pingfederate input := pa.UpdatePingFederateRuntimeCommandInput{ Body: *resourcePingAccessPingFederateRuntimeReadData(d), } result, _, err := svc.UpdatePingFederateRuntimeCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update PingFederateRuntime: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update PingFederateRuntime: %s", err)) } d.SetId("pingfederate_runtime_settings") @@ -90,10 +90,10 @@ func resourcePingAccessPingFederateRuntimeUpdate(ctx context.Context, d *schema. } func resourcePingAccessPingFederateRuntimeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).PingFederate + svc := m.(*pa.Client).Pingfederate _, err := svc.DeletePingFederateCommand() if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to reset PingFederateRuntime: %s", err))} + return diag.FromErr(fmt.Errorf("unable to reset PingFederateRuntime: %s", err)) } return nil } diff --git a/pingaccess/resource_pingaccess_pingfederate_runtime_test.go b/pingaccess/resource_pingaccess_pingfederate_runtime_test.go index c8480685..1f823449 100644 --- a/pingaccess/resource_pingaccess_pingfederate_runtime_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_runtime_test.go @@ -63,7 +63,7 @@ func testAccCheckPingAccessPingFederateRuntimeExists(n string) resource.TestChec return fmt.Errorf("No third party service ID is set") } - conn := testAccProvider.Meta().(*pa.Client).PingFederate + conn := testAccProvider.Meta().(*pa.Client).Pingfederate result, _, err := conn.GetPingFederateRuntimeCommand() if err != nil { diff --git a/pingaccess/resource_pingaccess_rule.go b/pingaccess/resource_pingaccess_rule.go index 8d920b03..c01994f4 100644 --- a/pingaccess/resource_pingaccess_rule.go +++ b/pingaccess/resource_pingaccess_rule.go @@ -60,7 +60,7 @@ func resourcePingAccessRuleCreate(ctx context.Context, d *schema.ResourceData, m result, _, err := svc.AddRuleCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create Rule: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create Rule: %s", err)) } d.SetId(result.Id.String()) @@ -75,7 +75,7 @@ func resourcePingAccessRuleRead(ctx context.Context, d *schema.ResourceData, m i result, _, err := svc.GetRuleCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read Rule: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read Rule: %s", err)) } return resourcePingAccessRuleReadResult(d, result, svc) @@ -90,7 +90,7 @@ func resourcePingAccessRuleUpdate(ctx context.Context, d *schema.ResourceData, m result, _, err := svc.UpdateRuleCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update Rule: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update Rule: %s", err)) } d.SetId(result.Id.String()) @@ -101,12 +101,12 @@ func resourcePingAccessRuleDelete(ctx context.Context, d *schema.ResourceData, m svc := m.(*pingaccess.Client).Rules _, err := svc.DeleteRuleCommand(&pingaccess.DeleteRuleCommandInput{Id: d.Id()}) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete Rule: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete Rule: %s", err)) } return nil } -func resourcePingAccessRuleReadResult(d *schema.ResourceData, input *pingaccess.RuleView, svc *pingaccess.RulesService) diag.Diagnostics { +func resourcePingAccessRuleReadResult(d *schema.ResourceData, input *pingaccess.RuleView, svc pingaccess.RulesAPI) diag.Diagnostics { var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) @@ -123,7 +123,7 @@ func resourcePingAccessRuleReadResult(d *schema.ResourceData, input *pingaccess. setResourceDataStringWithDiagnostic(d, "class_name", input.ClassName, &diags) setResourceDataStringWithDiagnostic(d, "configuration", &config, &diags) if err := d.Set("supported_destinations", input.SupportedDestinations); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } return diags } diff --git a/pingaccess/resource_pingaccess_rulesets.go b/pingaccess/resource_pingaccess_rulesets.go index 12bbe93c..02dceaae 100644 --- a/pingaccess/resource_pingaccess_rulesets.go +++ b/pingaccess/resource_pingaccess_rulesets.go @@ -77,7 +77,7 @@ func resourcePingAccessRuleSetCreate(ctx context.Context, d *schema.ResourceData result, _, err := svc.AddRuleSetCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create RuleSet: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create RuleSet: %s", err)) } d.SetId(result.Id.String()) @@ -90,13 +90,12 @@ func resourcePingAccessRuleSetRead(ctx context.Context, d *schema.ResourceData, Id: d.Id(), }) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read RuleSet: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read RuleSet: %s", err)) } return resourcePingAccessRuleSetReadResult(d, result) } func resourcePingAccessRuleSetUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - // log.Printf("[INFO] resourcePingAccessRuleSetUpdate") name := d.Get("name").(string) elementType := d.Get("element_type").(string) policy := expandStringList(d.Get("policy").(*schema.Set).List()) @@ -123,7 +122,7 @@ func resourcePingAccessRuleSetUpdate(ctx context.Context, d *schema.ResourceData result, _, err := svc.UpdateRuleSetCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update RuleSet: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update RuleSet: %s", err)) } d.SetId(result.Id.String()) return resourcePingAccessRuleSetReadResult(d, result) @@ -134,7 +133,7 @@ func resourcePingAccessRuleSetDelete(ctx context.Context, d *schema.ResourceData _, err := svc.DeleteRuleSetCommand(&pingaccess.DeleteRuleSetCommandInput{Id: d.Id()}) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete RuleSet: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete RuleSet: %s", err)) } return nil } @@ -150,7 +149,7 @@ func resourcePingAccessRuleSetReadResult(d *schema.ResourceData, input *pingacce polIds = append(polIds, strconv.Itoa(*p)) } if err := d.Set("policy", polIds); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } return diags diff --git a/pingaccess/resource_pingaccess_site.go b/pingaccess/resource_pingaccess_site.go index c0eb75cf..4f9b23c8 100644 --- a/pingaccess/resource_pingaccess_site.go +++ b/pingaccess/resource_pingaccess_site.go @@ -108,7 +108,7 @@ func resourcePingAccessSiteCreate(ctx context.Context, d *schema.ResourceData, m result, _, err := svc.AddSiteCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create Site: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create Site: %s", err)) } d.SetId(result.Id.String()) @@ -122,7 +122,7 @@ func resourcePingAccessSiteRead(ctx context.Context, d *schema.ResourceData, m i } result, _, err := svc.GetSiteCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read Site: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read Site: %s", err)) } return resourcePingAccessSiteReadResult(d, result) } @@ -135,7 +135,7 @@ func resourcePingAccessSiteUpdate(ctx context.Context, d *schema.ResourceData, m } result, _, err := svc.UpdateSiteCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update Site: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update Site: %s", err)) } return resourcePingAccessSiteReadResult(d, result) } @@ -150,7 +150,7 @@ func resourcePingAccessSiteDelete(ctx context.Context, d *schema.ResourceData, m _, err := svc.DeleteSiteCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete Site: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete Site: %s", err)) } return nil } @@ -167,11 +167,11 @@ func resourcePingAccessSiteReadResult(d *schema.ResourceData, input *pa.SiteView setResourceDataBoolWithDiagnostic(d, "secure", input.Secure, &diags) setResourceDataBoolWithDiagnostic(d, "send_pa_cookie", input.SendPaCookie, &diags) if err := d.Set("site_authenticator_ids", input.SiteAuthenticatorIds); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } setResourceDataBoolWithDiagnostic(d, "skip_hostname_verification", input.SkipHostnameVerification, &diags) if err := d.Set("targets", input.Targets); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } setResourceDataIntWithDiagnostic(d, "trusted_certificate_group_id", input.TrustedCertificateGroupId, &diags) setResourceDataBoolWithDiagnostic(d, "use_proxy", input.UseProxy, &diags) diff --git a/pingaccess/resource_pingaccess_site_authenticator.go b/pingaccess/resource_pingaccess_site_authenticator.go index 6773f094..a179b8fb 100644 --- a/pingaccess/resource_pingaccess_site_authenticator.go +++ b/pingaccess/resource_pingaccess_site_authenticator.go @@ -60,7 +60,7 @@ func resourcePingAccessSiteAuthenticatorCreate(ctx context.Context, d *schema.Re result, _, err := svc.AddSiteAuthenticatorCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create SiteAuthenticator: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create SiteAuthenticator: %s", err)) } d.SetId(result.Id.String()) @@ -74,7 +74,7 @@ func resourcePingAccessSiteAuthenticatorRead(ctx context.Context, d *schema.Reso } result, _, err := svc.GetSiteAuthenticatorCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read SiteAuthenticator: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read SiteAuthenticator: %s", err)) } return resourcePingAccessSiteAuthenticatorReadResult(d, result, svc) } @@ -87,7 +87,7 @@ func resourcePingAccessSiteAuthenticatorUpdate(ctx context.Context, d *schema.Re } result, _, err := svc.UpdateSiteAuthenticatorCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update SiteAuthenticator: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update SiteAuthenticator: %s", err)) } return resourcePingAccessSiteAuthenticatorReadResult(d, result, svc) } @@ -102,12 +102,12 @@ func resourcePingAccessSiteAuthenticatorDelete(ctx context.Context, d *schema.Re _, err := svc.DeleteSiteAuthenticatorCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete SiteAuthenticator: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete SiteAuthenticator: %s", err)) } return nil } -func resourcePingAccessSiteAuthenticatorReadResult(d *schema.ResourceData, input *pingaccess.SiteAuthenticatorView, svc *pingaccess.SiteAuthenticatorsService) diag.Diagnostics { +func resourcePingAccessSiteAuthenticatorReadResult(d *schema.ResourceData, input *pingaccess.SiteAuthenticatorView, svc pingaccess.SiteAuthenticatorsAPI) diag.Diagnostics { var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) diff --git a/pingaccess/resource_pingaccess_third_party_service.go b/pingaccess/resource_pingaccess_third_party_service.go index b5bb1c5c..fc74f558 100644 --- a/pingaccess/resource_pingaccess_third_party_service.go +++ b/pingaccess/resource_pingaccess_third_party_service.go @@ -2,10 +2,7 @@ package pingaccess import ( "context" - "crypto/tls" "fmt" - "net/http" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" @@ -92,7 +89,7 @@ func resourcePingAccessThirdPartyServiceCreate(ctx context.Context, d *schema.Re result, _, err := svc.AddThirdPartyServiceCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create ThirdPartyService: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create ThirdPartyService: %s", err)) } @@ -107,7 +104,7 @@ func resourcePingAccessThirdPartyServiceRead(ctx context.Context, d *schema.Reso } result, _, err := svc.GetThirdPartyServiceCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read ThirdPartyService: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read ThirdPartyService: %s", err)) } return resourcePingAccessThirdPartyServiceReadResult(d, result) } @@ -120,14 +117,13 @@ func resourcePingAccessThirdPartyServiceUpdate(ctx context.Context, d *schema.Re } result, _, err := svc.UpdateThirdPartyServiceCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update ThirdPartyService: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update ThirdPartyService: %s", err)) } return resourcePingAccessThirdPartyServiceReadResult(d, result) } func resourcePingAccessThirdPartyServiceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).ThirdPartyServices - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} input := &pa.DeleteThirdPartyServiceCommandInput{ Id: d.Id(), @@ -135,7 +131,7 @@ func resourcePingAccessThirdPartyServiceDelete(ctx context.Context, d *schema.Re _, err := svc.DeleteThirdPartyServiceCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete ThirdPartyService: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete ThirdPartyService: %s", err)) } return nil } @@ -153,7 +149,7 @@ func resourcePingAccessThirdPartyServiceReadResult(d *schema.ResourceData, input setResourceDataIntWithDiagnostic(d, "trusted_certificate_group_id", input.TrustedCertificateGroupId, &diags) setResourceDataBoolWithDiagnostic(d, "use_proxy", input.UseProxy, &diags) if err := d.Set("targets", input.Targets); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } return diags } diff --git a/pingaccess/resource_pingaccess_trusted_certificate_groups.go b/pingaccess/resource_pingaccess_trusted_certificate_groups.go index f6c2884e..97717b07 100644 --- a/pingaccess/resource_pingaccess_trusted_certificate_groups.go +++ b/pingaccess/resource_pingaccess_trusted_certificate_groups.go @@ -70,7 +70,7 @@ func resourcePingAccessTrustedCertificateGroupsCreate(ctx context.Context, d *sc result, _, err := svc.AddTrustedCertificateGroupCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create TrustedCertificateGroup: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create TrustedCertificateGroup: %s", err)) } d.SetId(result.Id.String()) @@ -84,7 +84,7 @@ func resourcePingAccessTrustedCertificateGroupsRead(ctx context.Context, d *sche } result, _, err := svc.GetTrustedCertificateGroupCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read TrustedCertificateGroup: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read TrustedCertificateGroup: %s", err)) } return resourcePingAccessTrustedCertificateGroupsReadResult(d, result) } @@ -97,7 +97,7 @@ func resourcePingAccessTrustedCertificateGroupsUpdate(ctx context.Context, d *sc } result, _, err := svc.UpdateTrustedCertificateGroupCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update TrustedCertificateGroup: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update TrustedCertificateGroup: %s", err)) } return resourcePingAccessTrustedCertificateGroupsReadResult(d, result) } @@ -112,7 +112,7 @@ func resourcePingAccessTrustedCertificateGroupsDelete(ctx context.Context, d *sc _, err := svc.DeleteTrustedCertificateGroupCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete TrustedCertificateGroup: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete TrustedCertificateGroup: %s", err)) } return nil } @@ -131,7 +131,7 @@ func resourcePingAccessTrustedCertificateGroupsReadResult(d *schema.ResourceData certs = append(certs, strconv.Itoa(*cert)) } if err := d.Set("cert_ids", certs); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } diff --git a/pingaccess/resource_pingaccess_virtualhost.go b/pingaccess/resource_pingaccess_virtualhost.go index 9e914dd3..06c7f46e 100644 --- a/pingaccess/resource_pingaccess_virtualhost.go +++ b/pingaccess/resource_pingaccess_virtualhost.go @@ -60,7 +60,7 @@ func resourcePingAccessVirtualHostCreate(ctx context.Context, d *schema.Resource result, _, err := svc.AddVirtualHostCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create VirtualHost: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create VirtualHost: %s", err)) } d.SetId(result.Id.String()) @@ -74,7 +74,7 @@ func resourcePingAccessVirtualHostRead(ctx context.Context, d *schema.ResourceDa } result, _, err := svc.GetVirtualHostCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read VirtualHost: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read VirtualHost: %s", err)) } return resourcePingAccessVirtualHostReadResult(d, result) } @@ -88,7 +88,7 @@ func resourcePingAccessVirtualHostUpdate(ctx context.Context, d *schema.Resource result, _, err := svc.UpdateVirtualHostCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update VirtualHost: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update VirtualHost: %s", err)) } return resourcePingAccessVirtualHostReadResult(d, result) } @@ -101,7 +101,7 @@ func resourcePingAccessVirtualHostDelete(ctx context.Context, d *schema.Resource _, err := svc.DeleteVirtualHostCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete VirtualHost: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete VirtualHost: %s", err)) } return nil } diff --git a/pingaccess/resource_pingaccess_websession.go b/pingaccess/resource_pingaccess_websession.go index 79016f63..b15019fb 100644 --- a/pingaccess/resource_pingaccess_websession.go +++ b/pingaccess/resource_pingaccess_websession.go @@ -155,7 +155,7 @@ func resourcePingAccessWebSessionCreate(ctx context.Context, d *schema.ResourceD result, _, err := svc.AddWebSessionCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to create WebSession: %s", err))} + return diag.FromErr(fmt.Errorf("unable to create WebSession: %s", err)) } d.SetId(result.Id.String()) @@ -169,7 +169,7 @@ func resourcePingAccessWebSessionRead(ctx context.Context, d *schema.ResourceDat } result, _, err := svc.GetWebSessionCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to read WebSession: %s", err))} + return diag.FromErr(fmt.Errorf("unable to read WebSession: %s", err)) } return resourcePingAccessWebSessionReadResult(d, result) } @@ -182,7 +182,7 @@ func resourcePingAccessWebSessionUpdate(ctx context.Context, d *schema.ResourceD } result, _, err := svc.UpdateWebSessionCommand(&input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to update WebSession: %s", err))} + return diag.FromErr(fmt.Errorf("unable to update WebSession: %s", err)) } return resourcePingAccessWebSessionReadResult(d, result) } @@ -197,7 +197,7 @@ func resourcePingAccessWebSessionDelete(ctx context.Context, d *schema.ResourceD _, err := svc.DeleteWebSessionCommand(input) if err != nil { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("unable to delete WebSession: %s", err))} + return diag.FromErr(fmt.Errorf("unable to delete WebSession: %s", err)) } return nil } @@ -227,7 +227,7 @@ func resourcePingAccessWebSessionReadResult(d *schema.ResourceData, input *pa.We if input.Scopes != nil { if err := d.Set("scopes", *input.Scopes); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } if input.ClientCredentials != nil { @@ -238,7 +238,7 @@ func resourcePingAccessWebSessionReadResult(d *schema.ResourceData, input *pa.We creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = pw } if err := d.Set("client_credentials", creds); err != nil { - diags = append(diags, diag.FromErr(err)) + diags = append(diags, diag.FromErr(err)...) } } return diags diff --git a/pingaccess/resource_pingaccess_websession_test.go b/pingaccess/resource_pingaccess_websession_test.go index 9891121e..1865ce71 100644 --- a/pingaccess/resource_pingaccess_websession_test.go +++ b/pingaccess/resource_pingaccess_websession_test.go @@ -152,8 +152,8 @@ func Test_resourcePingAccessWebSessionReadData(t *testing.T) { resourceSchema := resourcePingAccessWebSessionSchema() resourceLocalData := schema.TestResourceDataRaw(t, resourceSchema, map[string]interface{}{}) - resourcePingAccessWebSessionReadResult(resourceLocalData, &tc.WebSession) + resourcePingAccessWebSessionReadResult(resourceLocalData, &tc.WebSession) if got := *resourcePingAccessWebSessionReadData(resourceLocalData); !cmp.Equal(got, tc.WebSession) { t.Errorf("resourcePingAccessWebSessionReadData() = %v", cmp.Diff(got, tc.WebSession)) } diff --git a/pingaccess/structures.go b/pingaccess/structures.go index 1ae0cb6c..75dab0d9 100644 --- a/pingaccess/structures.go +++ b/pingaccess/structures.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "hash/crc32" "log" "strconv" "strings" @@ -12,7 +13,6 @@ import ( "github.com/tidwall/gjson" "github.com/tidwall/sjson" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/hashcode" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) @@ -336,7 +336,7 @@ func configFieldHash(v interface{}) int { if d, ok := m["md5sum"]; ok && d.(string) != "" { buf.WriteString(fmt.Sprintf("%s-", d.(string))) } - return hashcode.String(buf.String()) + return hashString(buf.String()) } func flattenLinkViewList(in []*pa.LinkView) []interface{} { @@ -451,7 +451,7 @@ func validateConfiguration(className string, d *schema.ResourceDiff, desc *pa.De for _, f := range value.ConfigurationFields { if *f.Required { if v := gjson.Get(conf, *f.Name); !v.Exists() { - diags = append(diags, diag.FromErr(fmt.Errorf("the field '%s' is required for the class_name '%s'", *f.Name, className))) + diags = append(diags, diag.FromErr(fmt.Errorf("the field '%s' is required for the class_name '%s'", *f.Name, className))...) } } } @@ -496,7 +496,7 @@ func validateRulesConfiguration(className string, d *schema.ResourceDiff, desc * for _, f := range value.ConfigurationFields { if *f.Required { if v := gjson.Get(conf, *f.Name); !v.Exists() { - diags = append(diags, diag.FromErr(fmt.Errorf("the field '%s' is required for the class_name '%s'", *f.Name, className))) + diags = append(diags, diag.FromErr(fmt.Errorf("the field '%s' is required for the class_name '%s'", *f.Name, className))...) } } } @@ -513,3 +513,20 @@ func validateRulesConfiguration(className string, d *schema.ResourceDiff, desc * } return nil } + +// String hashes a string to a unique hashcode. +// +// crc32 returns a uint32, but for our use we need +// and non negative integer. Here we cast to an integer +// and invert it if the result is negative. +func hashString(s string) int { + v := int(crc32.ChecksumIEEE([]byte(s))) + if v >= 0 { + return v + } + if -v >= 0 { + return -v + } + // v == MinInt + return 0 +} diff --git a/pingaccess/validators.go b/pingaccess/validators.go index 53fcf373..096af796 100644 --- a/pingaccess/validators.go +++ b/pingaccess/validators.go @@ -10,7 +10,7 @@ import ( func validateWebOrAPI(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "Web" && v != "API" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Web' or 'API' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'Web' or 'API' not %s", v)) } return nil } @@ -18,7 +18,7 @@ func validateWebOrAPI(value interface{}, path cty.Path) diag.Diagnostics { func validateRuleOrRuleSet(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "Rule" && v != "RuleSet" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Rule' or 'RuleSet' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'Rule' or 'RuleSet' not %s", v)) } return nil } @@ -26,7 +26,7 @@ func validateRuleOrRuleSet(value interface{}, path cty.Path) diag.Diagnostics { func validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "SuccessIfAllSucceed" && v != "SuccessIfAnyOneSucceeds" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not %s", v)) } return nil } @@ -34,7 +34,7 @@ func validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(value interface{}, pat func validateAudience(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if len(v) < 1 || len(v) > 31 { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not %d", len(v)))} + return diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not %d", len(v))) } return nil } @@ -42,7 +42,7 @@ func validateAudience(value interface{}, path cty.Path) diag.Diagnostics { func validateCookieType(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "Encrypted" && v != "Signed" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Encrypted' or 'Signed' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'Encrypted' or 'Signed' not %s", v)) } return nil } @@ -50,7 +50,7 @@ func validateCookieType(value interface{}, path cty.Path) diag.Diagnostics { func validateOidcLoginType(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "Code" && v != "POST" && v != "x_post" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Code', 'POST' or 'x_post' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'Code', 'POST' or 'x_post' not %s", v)) } return nil } @@ -58,7 +58,7 @@ func validateOidcLoginType(value interface{}, path cty.Path) diag.Diagnostics { func validatePkceChallengeType(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "SHA256" && v != "OFF" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'SHA256' or 'OFF' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'SHA256' or 'OFF' not %s", v)) } return nil } @@ -66,7 +66,7 @@ func validatePkceChallengeType(value interface{}, path cty.Path) diag.Diagnostic func validateRequestPreservationType(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "None" && v != "POST" && v != "All" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'None', 'POST' or 'All' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'None', 'POST' or 'All' not %s", v)) } return nil } @@ -74,7 +74,7 @@ func validateRequestPreservationType(value interface{}, path cty.Path) diag.Diag func validateWebStorageType(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "SessionStorage" && v != "LocalStorage" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'SessionStorage' or 'LocalStorage' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'SessionStorage' or 'LocalStorage' not %s", v)) } return nil } @@ -82,7 +82,7 @@ func validateWebStorageType(value interface{}, path cty.Path) diag.Diagnostics { func validateListLocationValue(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "FIRST" && v != "LAST" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'FIRST' or 'LAST' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'FIRST' or 'LAST' not %s", v)) } return nil } @@ -90,7 +90,7 @@ func validateListLocationValue(value interface{}, path cty.Path) diag.Diagnostic func validateHTTPListenerName(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "ADMIN" && v != "AGENT" && v != "ENGINE" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'ADMIN', 'AGENT' or 'ENGINE' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'ADMIN', 'AGENT' or 'ENGINE' not %s", v)) } return nil } @@ -98,7 +98,7 @@ func validateHTTPListenerName(value interface{}, path cty.Path) diag.Diagnostics func validateWebSessionSameSite(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "Disabled" && v != "Lax" && v != "None" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Disabled', 'Lax' or 'None' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'Disabled', 'Lax' or 'None' not %s", v)) } return nil } @@ -106,7 +106,7 @@ func validateWebSessionSameSite(value interface{}, path cty.Path) diag.Diagnosti func validateAuditLevel(value interface{}, path cty.Path) diag.Diagnostics { v := value.(string) if v != "ON" && v != "OFF" { - return diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'ON' or 'OFF' not %s", v))} + return diag.FromErr(fmt.Errorf("must be either 'ON' or 'OFF' not %s", v)) } return nil } diff --git a/pingaccess/validators_test.go b/pingaccess/validators_test.go index ab446b8d..41cbc034 100644 --- a/pingaccess/validators_test.go +++ b/pingaccess/validators_test.go @@ -27,7 +27,7 @@ func Test_validateWebOrAPI(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Web' or 'API' not other"))}, + expectedDiags: diag.FromErr(fmt.Errorf("must be either 'Web' or 'API' not other")), }, } for _, tc := range tests { @@ -70,7 +70,7 @@ func Test_validateRuleOrRuleSet(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Rule' or 'RuleSet' not other"))}, + expectedDiags: diag.FromErr(fmt.Errorf("must be either 'Rule' or 'RuleSet' not other")), }, } for _, tc := range tests { @@ -113,7 +113,7 @@ func Test_validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not other"))}, + expectedDiags: diag.FromErr(fmt.Errorf("must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not other")), }, } for _, tc := range tests { @@ -151,12 +151,12 @@ func Test_validateAudience(t *testing.T) { { name: "Empty value failes", value: "", - expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not 0"))}, + expectedDiags: diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not 0")), }, { name: "Very long value fails", value: "12345678901234567890123456789012", - expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not 32"))}, + expectedDiags: diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not 32")), }, } for _, tc := range tests { @@ -199,7 +199,7 @@ func Test_validateCookieType(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Encrypted' or 'Signed' not other"))}, + expectedDiags: diag.FromErr(fmt.Errorf("must be either 'Encrypted' or 'Signed' not other")), }, } for _, tc := range tests { @@ -247,7 +247,7 @@ func Test_validateOidcLoginType(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'Code', 'POST' or 'x_post' not other"))}, + expectedDiags: diag.FromErr(fmt.Errorf("must be either 'Code', 'POST' or 'x_post' not other")), }, } for _, tc := range tests { @@ -295,7 +295,7 @@ func Test_validateRequestPreservationType(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'None', 'POST' or 'All' not other"))}, + expectedDiags: diag.FromErr(fmt.Errorf("must be either 'None', 'POST' or 'All' not other")), }, } for _, tc := range tests { @@ -338,7 +338,7 @@ func Test_validateWebStorageType(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'SessionStorage' or 'LocalStorage' not other"))}, + expectedDiags: diag.FromErr(fmt.Errorf("must be either 'SessionStorage' or 'LocalStorage' not other")), }, } for _, tc := range tests { @@ -381,7 +381,7 @@ func Test_validateListLocationValue(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.Diagnostics{diag.FromErr(fmt.Errorf("must be either 'FIRST' or 'LAST' not other"))}, + expectedDiags: diag.FromErr(fmt.Errorf("must be either 'FIRST' or 'LAST' not other")), }, } for _, tc := range tests { diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt index 5f14d116..899129ec 100644 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -1,3 +1,3 @@ AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index 56fdfc2b..99849c0e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -138,8 +138,27 @@ type RequestFailure interface { RequestID() string } -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { return newRequestError(err, statusCode, reqID) } + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index 0202a008..9cf7eaf4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -1,6 +1,9 @@ package awserr -import "fmt" +import ( + "encoding/hex" + "fmt" +) // SprintError returns a string of the formatted error code. // @@ -119,6 +122,7 @@ type requestError struct { awsError statusCode int requestID string + bytes []byte } // newRequestError returns a wrapped error with additional information for @@ -170,6 +174,29 @@ func (r requestError) OrigErrs() []error { return []error{r.OrigErr()} } +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + // An error list that satisfies the golang interface type errorList []error @@ -181,7 +208,7 @@ func (e errorList) Error() string { // How do we want to handle the array size being zero if size := len(e); size > 0 { for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) + msg += e[i].Error() // We check the next index to see if it is within the slice. // If it is, then we append a newline. We do this, because unit tests // could be broken with the additional '\n' diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go index 59fa4a55..142a7a01 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -15,7 +15,7 @@ func DeepEqual(a, b interface{}) bool { rb := reflect.Indirect(reflect.ValueOf(b)) if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal + // If the elements are both nil, and of the same type they are equal // If they are of different types they are not equal return reflect.TypeOf(a) == reflect.TypeOf(b) } else if raValid != rbValid { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go index 11c52c38..285e54d6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -185,13 +185,12 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { // SetValueAtPath sets a value at the case insensitive lexical path inside // of a structure. func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue } + setValue(rval, v) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go index b6432f1a..645df245 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -23,28 +23,27 @@ func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { case reflect.Struct: buf.WriteString("{\n") - names := []string{} for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { continue // ignore unexported fields } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { continue // ignore unset fields } - names = append(names, name) - } - for i, n := range names { - val := v.FieldByName(n) buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - stringValue(val, indent+2, buf) + buf.WriteString(ft.Name + ": ") - if i < len(names)-1 { - buf.WriteString(",\n") + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) } + + buf.WriteString(",\n") } buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 212fe25e..c022407f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -18,7 +18,7 @@ type Config struct { // States that the signing name did not come from a modeled source but // was derived based on other data. Used by service client constructors - // to determine if the signin name can be overriden based on metadata the + // to determine if the signin name can be overridden based on metadata the // service has. SigningNameDerived bool } @@ -64,7 +64,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op default: maxRetries := aws.IntValue(cfg.MaxRetries) if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 + maxRetries = DefaultRetryerMaxNumRetries } svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index a397b0d0..0fda4251 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,6 +1,7 @@ package client import ( + "math" "strconv" "time" @@ -9,82 +10,142 @@ import ( ) // DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// request.Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. // -// type retryer struct { -// client.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { - NumMaxRetries int + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. + NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration } +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + // MaxRetries returns the number of maximum returns the service will use to make // an individual API request. func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - if delay, ok := getRetryDelay(r); ok { - return delay - } - minTime = 500 + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay } retryCount := r.RetryCount - if throttle && retryCount > 8 { - retryCount = 8 - } else if retryCount > 13 { - retryCount = 13 + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) } + return delay + initialDelay +} - delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) } // ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + // If one of the other handlers already set the retry state // we don't want to override it based on the service's state if r.Retryable != nil { return *r.Retryable } - - if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 502: - case 503: - case 504: - default: - return r.IsErrorThrottle() - } - - return true + return r.IsErrorRetryable() || r.IsErrorThrottle() } // This will look in the Retry-After header, RFC 7231, for how long // it will wait before attempting another request -func getRetryDelay(r *request.Request) (time.Duration, bool) { +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { if !canUseRetryAfterHeader(r) { return 0, false } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index ce9fb896..8958c32d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -67,10 +67,14 @@ func logRequest(r *request.Request) { if !bodySeekable { r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) } - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } } r.Config.Logger.Log(fmt.Sprintf(logReqMsg, @@ -118,6 +122,12 @@ var LogHTTPResponseHandler = request.NamedHandler{ func logResponse(r *request.Request) { lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) if logBody { r.HTTPResponse.Body = &teeReaderCloser{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 00000000..881d575f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index e9695ef2..fd1e240f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -20,7 +20,7 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), @@ -239,12 +239,19 @@ type Config struct { // Key: aws.String("/foo/bar/moo"), // }) EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool } // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), @@ -399,6 +406,13 @@ func (c *Config) WithEndpointDiscovery(t bool) *Config { return c } +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + // MergeIn merges the passed in configs into the existing config object. func (c *Config) MergeIn(cfgs ...*Config) { for _, other := range cfgs { @@ -502,6 +516,10 @@ func mergeInConfig(dst *Config, other *Config) { if other.EnableEndpointDiscovery != nil { dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go similarity index 58% rename from vendor/github.com/aws/aws-sdk-go/aws/context.go rename to vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go index 79f42685..2866f9a7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -1,8 +1,8 @@ +// +build !go1.9 + package aws -import ( - "time" -) +import "time" // Context is an copy of the Go v1.7 stdlib's context.Context interface. // It is represented as a SDK interface to enable you to use the "WithContext" @@ -35,37 +35,3 @@ type Context interface { // functions. Value(key interface{}) interface{} } - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return backgroundCtx -} - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go deleted file mode 100644 index 064f75c9..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.7 - -package aws - -import "context" - -var ( - backgroundCtx = context.Background() -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 00000000..3718b26e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go similarity index 59% rename from vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go rename to vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go index 8fdda530..66c5945d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -39,3 +39,18 @@ func (e *emptyCtx) String() string { var ( backgroundCtx = new(emptyCtx) ) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 00000000..9c29f29a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 00000000..304fd156 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index ff5d58e0..4e076c18 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -179,6 +179,242 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + // Int64 returns a pointer to the int64 value passed in. func Int64(v int64) *int64 { return &v @@ -238,6 +474,301 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + // Float64 returns a pointer to the float64 value passed in. func Float64(v float64) *float64 { return &v diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index cfcddf3d..0c60e612 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -72,9 +72,9 @@ var ValidateReqSigHandler = request.NamedHandler{ signedTime = r.LastSignedAt } - // 10 minutes to allow for some clock skew/delays in transmission. + // 5 minutes to allow for some clock skew/delays in transmission. // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(10 * time.Minute).After(time.Now()) { + if signedTime.Add(5 * time.Minute).After(time.Now()) { return } @@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) { Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } - // Catch all other request errors. + // Catch all request errors, and let the default retrier determine + // if the error is retryable. r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable // Override the error with a context canceled error, if that was canceled. ctx := r.Context() @@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } - r.RetryCount++ - r.Error = nil - } -}} + r.RetryCount++ + r.Error = nil + } + }} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go index a15f496b..ab69c7a6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -17,7 +17,7 @@ var SDKVersionUserAgentHandler = request.NamedHandler{ } const execEnvVar = `AWS_EXECUTION_ENV` -const execEnvUAKey = `exec_env` +const execEnvUAKey = `exec-env` // AddHostExecEnvUserAgentHander is a request handler appending the SDK's // execution environment to the user agent. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index dc82f4c3..4af59215 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -49,8 +49,11 @@ package credentials import ( + "fmt" "sync" "time" + + "github.com/aws/aws-sdk-go/aws/awserr" ) // AnonymousCredentials is an empty Credential object that can be used as @@ -81,6 +84,12 @@ type Value struct { ProviderName string } +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + // A Provider is the interface for any component which will provide credentials // Value. A provider is required to manage its own Expired state, and what to // be expired means. @@ -97,6 +106,14 @@ type Provider interface { IsExpired() bool } +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + // An ErrorProvider is a stub credentials provider that always returns an error // this is used by the SDK when construction a known provider is not possible // due to an error. @@ -163,6 +180,11 @@ func (e *Expiry) IsExpired() bool { return e.expiration.Before(curTime()) } +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + // A Credentials provides concurrency safe retrieval of AWS credentials Value. // Credentials will cache the credentials value until they expire. Once the value // expires the next Get will attempt to retrieve valid credentials. @@ -255,3 +277,23 @@ func (c *Credentials) IsExpired() bool { func (c *Credentials) isExpired() bool { return c.forceRefresh || c.provider.IsExpired() } + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), + nil) + } + if c.forceRefresh { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index 0ed791be..43d4ed38 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkuri" ) @@ -142,7 +143,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { } if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) } return credsList, nil @@ -164,7 +166,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred respCreds := ec2RoleCredRespBody{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { return ec2RoleCredRespBody{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index ace51313..1a7af53a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -39,6 +39,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" ) // ProviderName is the name of the credentials provider. @@ -97,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin return p } -// NewCredentialsClient returns a Credentials wrapper for retrieving credentials -// from an arbitrary endpoint concurrently. The client will request the +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) } @@ -174,7 +175,7 @@ func unmarshalHandler(r *request.Request) { out := r.Data.(*getCredentialsOutput) if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode endpoint credentials", err, ) @@ -185,11 +186,15 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() var errOut errorOutput - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { - r.Error = awserr.New("SerializationError", - "failed to decode endpoint credentials", - err, + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, ) + return } // Response body format is not consistent between metadata endpoints. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 00000000..1980c8c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,425 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = 1024 + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 4108e433..2e528d13 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -80,16 +80,18 @@ package stscreds import ( "fmt" + "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkrand" "github.com/aws/aws-sdk-go/service/sts" ) -// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. // An error is returned if reading from stdin fails. // // Use this function go read MFA tokens from stdin. The function makes no attempt @@ -102,7 +104,7 @@ import ( // Will wait forever until something is provided on the stdin. func StdinTokenProvider() (string, error) { var v string - fmt.Printf("Assume Role MFA token code: ") + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") _, err := fmt.Scanln(&v) return v, err @@ -193,6 +195,18 @@ type AssumeRoleProvider struct { // // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 } // NewCredentials returns a pointer to a new Credentials object wrapping the @@ -244,7 +258,6 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(* // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. @@ -254,8 +267,9 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { // Expire as often as AWS permits. p.Duration = DefaultDuration } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), RoleArn: aws.String(p.RoleARN), RoleSessionName: aws.String(p.RoleSessionName), ExternalId: p.ExternalID, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000..b20b6339 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,100 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + + client stsiface.STSAPI + ExpiryWindow time.Duration + + tokenFilePath string + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFilePath: path, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + b, err := ioutil.ReadFile(p.tokenFilePath) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath) + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + }) + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go index 152d785b..25a66d1d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -1,30 +1,61 @@ -// Package csm provides Client Side Monitoring (CSM) which enables sending metrics -// via UDP connection. Using the Start function will enable the reporting of -// metrics on a given port. If Start is called, with different parameters, again, -// a panic will occur. +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. // -// Pause can be called to pause any metrics publishing on a given port. Sessions -// that have had their handlers modified via InjectHandlers may still be used. -// However, the handlers will act as a no-op meaning no metrics will be published. +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. // -// Example: // r, err := csm.Start("clientID", ":31000") // if err != nil { // panic(fmt.Errorf("failed starting CSM: %v", err)) // } // +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// // sess, err := session.NewSession(&aws.Config{}) // if err != nil { // panic(fmt.Errorf("failed loading session: %v", err)) // } // +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. // r.InjectHandlers(&sess.Handlers) // -// client := s3.New(sess) -// resp, err := client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() // // // Will pause monitoring // r.Pause() @@ -35,12 +66,4 @@ // // // Resume monitoring // r.Continue() -// -// Start returns a Reporter that is used to enable or disable monitoring. If -// access to the Reporter is required later, calling Get will return the Reporter -// singleton. -// -// Example: -// r := csm.Get() -// r.Continue() package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go index 2f0c6eac..4b19e280 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -2,6 +2,7 @@ package csm import ( "fmt" + "strings" "sync" ) @@ -9,19 +10,40 @@ var ( lock sync.Mutex ) -// Client side metric handler names const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" ) -// Start will start the a long running go routine to capture +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture // client side metrics. Calling start multiple time will only // start the metric listener once and will panic if a different // client ID or port is passed in. // -// Example: -// r, err := csm.Start("clientID", "127.0.0.1:8094") +// r, err := csm.Start("clientID", "127.0.0.1:31000") // if err != nil { // panic(fmt.Errorf("expected no error, but received %v", err)) // } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go index 6f57024d..5bacc791 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -3,6 +3,8 @@ package csm import ( "strconv" "time" + + "github.com/aws/aws-sdk-go/aws" ) type metricTime time.Time @@ -39,6 +41,12 @@ type metric struct { SDKException *string `json:"SdkException,omitempty"` SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + DestinationIP *string `json:"DestinationIp,omitempty"` ConnectionReused *int `json:"ConnectionReused,omitempty"` @@ -51,3 +59,51 @@ type metric struct { MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` } + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go index 514fc373..82a3e345 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -16,25 +16,26 @@ var ( type metricChan struct { ch chan metric - paused int64 + paused *int64 } func newMetricChan(size int) metricChan { return metricChan{ - ch: make(chan metric, size), + ch: make(chan metric, size), + paused: new(int64), } } func (ch *metricChan) Pause() { - atomic.StoreInt64(&ch.paused, pausedEnum) + atomic.StoreInt64(ch.paused, pausedEnum) } func (ch *metricChan) Continue() { - atomic.StoreInt64(&ch.paused, runningEnum) + atomic.StoreInt64(ch.paused, runningEnum) } func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(&ch.paused) + v := atomic.LoadInt64(ch.paused) return v == pausedEnum } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 00000000..54a99280 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go index 11861844..c7008d8c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -10,11 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) -const ( - // DefaultPort is used when no port is specified - DefaultPort = "31000" -) - // Reporter will gather metrics of API requests made and // send those metrics to the CSM endpoint. type Reporter struct { @@ -82,26 +77,29 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { if r.Error != nil { if awserr, ok := r.Error.(awserr.Error); ok { - setError(&m, awserr) + m.SetException(getMetricException(awserr)) } } + m.TruncateFields() rep.metricsCh.Push(m) } -func setError(m *metric, err awserr.Error) { +func getMetricException(err awserr.Error) metricException { msg := err.Error() code := err.Code() switch code { case "RequestError", - "SerializationError", + request.ErrCodeSerialization, request.CanceledErrorCode: - m.SDKException = &code - m.SDKExceptionMessage = &msg + return sdkException{ + requestException{exception: code, message: msg}, + } default: - m.AWSException = &code - m.AWSExceptionMessage = &msg + return awsException{ + requestException{exception: code, message: msg}, + } } } @@ -116,14 +114,27 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) { API: aws.String(r.Operation.Name), Service: aws.String(r.ClientInfo.ServiceID), Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), Type: aws.String("ApiCall"), AttemptCount: aws.Int(r.RetryCount + 1), Region: r.Config.Region, - Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), XAmzRequestID: aws.String(r.RequestID), MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), } + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + // TODO: Probably want to figure something out for logging dropped // metrics rep.metricsCh.Push(m) @@ -174,8 +185,9 @@ func (rep *Reporter) start() { } } -// Pause will pause the metric channel preventing any new metrics from -// being added. +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. func (rep *Reporter) Pause() { lock.Lock() defer lock.Unlock() @@ -187,8 +199,9 @@ func (rep *Reporter) Pause() { rep.close() } -// Continue will reopen the metric channel and allow for monitoring -// to be resumed. +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. func (rep *Reporter) Continue() { lock.Lock() defer lock.Unlock() @@ -203,10 +216,18 @@ func (rep *Reporter) Continue() { rep.metricsCh.Continue() } +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + // InjectHandlers will will enable client side metrics and inject the proper // handlers to handle how metrics are sent. // -// Example: +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// // // Start must be called in order to inject the correct handlers // r, err := csm.Start("clientID", "127.0.0.1:8094") // if err != nil { @@ -223,13 +244,15 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { return } - apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric} - apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric} - - handlers.Complete.PushFrontNamed(apiCallHandler) - handlers.Complete.PushFrontNamed(apiCallAttemptHandler) + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) - handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler) + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) } // boolIntValue return 1 for true and 0 for false. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index c215cd3f..d126764c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -24,8 +24,9 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) + err := req.Send() - return output.Content, req.Send() + return output.Content, err } // GetUserData returns the userdata that was configured for the service. If @@ -45,8 +46,9 @@ func (c *EC2Metadata) GetUserData() (string, error) { r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) } }) + err := req.Send() - return output.Content, req.Send() + return output.Content, err } // GetDynamicData uses the path provided to request information from the EC2 @@ -61,8 +63,9 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) + err := req.Send() - return output.Content, req.Send() + return output.Content, err } // GetInstanceIdentityDocument retrieves an identity document describing an @@ -79,7 +82,7 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument doc := EC2InstanceIdentityDocument{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 instance identity document", err) } @@ -98,7 +101,7 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { info := EC2IAMInfo{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { return EC2IAMInfo{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 IAM info", err) } @@ -118,6 +121,10 @@ func (c *EC2Metadata) Region() (string, error) { return "", err } + if len(resp) == 0 { + return "", awserr.New("EC2MetadataError", "invalid Region response", nil) + } + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 return resp[:len(resp)-1], nil } @@ -145,18 +152,19 @@ type EC2IAMInfo struct { // An EC2InstanceIdentityDocument provides the shape for unmarshaling // an instance identity document type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 53457cac..4c5636e3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -4,7 +4,7 @@ // This package's client can be disabled completely by setting the environment // variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to // true instructs the SDK to disable the EC2 Metadata client. The client cannot -// be used while the environemnt variable is set to true, (case insensitive). +// be used while the environment variable is set to true, (case insensitive). package ec2metadata import ( @@ -92,6 +92,9 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.Send.SwapNamed(request.NamedHandler{ Name: corehandlers.SendHandler.Name, Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } r.Error = awserr.New( request.CanceledErrorCode, "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", @@ -120,7 +123,7 @@ func unmarshalHandler(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err) return } @@ -133,7 +136,7 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 1ddeae10..87b9ff3f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -85,6 +85,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol custAddS3DualStack(p) custRmIotDataService(p) custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) } return ps, nil @@ -149,6 +150,33 @@ func custFixAppAutoscalingChina(p *partition) { p.Services[serviceName] = s } +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + type decodeModelError struct { awsError } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index c01c9018..452cefda 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -11,10 +11,13 @@ const ( AwsPartitionID = "aws" // AWS Standard partition. AwsCnPartitionID = "aws-cn" // AWS China partition. AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. ) // AWS Standard partition's regions. const ( + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). @@ -22,9 +25,11 @@ const ( ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). CaCentral1RegionID = "ca-central-1" // Canada (Central). EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuNorth1RegionID = "eu-north-1" // EU (Stockholm). EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). EuWest3RegionID = "eu-west-3" // EU (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -44,143 +49,18 @@ const ( UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). ) -// Service identifiers +// AWS ISO (US) partition's regions. const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AppsyncServiceID = "appsync" // Appsync. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - ChimeServiceID = "chime" // Chime. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - IotanalyticsServiceID = "iotanalytics" // Iotanalytics. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - S3ControlServiceID = "s3-control" // S3Control. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). ) // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -188,7 +68,7 @@ func DefaultResolver() Resolver { } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -202,6 +82,8 @@ var defaultPartitions = partitions{ awsPartition, awscnPartition, awsusgovPartition, + awsisoPartition, + awsisobPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -215,7 +97,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") return reg }(), }, @@ -225,6 +107,9 @@ var awsPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, "ap-northeast-1": region{ Description: "Asia Pacific (Tokyo)", }, @@ -246,6 +131,9 @@ var awsPartition = partition{ "eu-central-1": region{ Description: "EU (Frankfurt)", }, + "eu-north-1": region{ + Description: "EU (Stockholm)", + }, "eu-west-1": region{ Description: "EU (Ireland)", }, @@ -255,6 +143,9 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "EU (Paris)", }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -281,6 +172,7 @@ var awsPartition = partition{ "acm": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -288,9 +180,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -303,26 +197,147 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "api.ecr": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "api.mediatailor": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "api.pricing": service{ @@ -339,6 +354,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -346,17 +362,45 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "apigateway": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -364,9 +408,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -383,6 +429,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -390,9 +437,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -400,6 +449,24 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "appmesh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "appstream2": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -409,6 +476,10 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -424,6 +495,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -432,12 +504,15 @@ var awsPartition = partition{ "athena": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -450,6 +525,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -457,9 +533,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -476,16 +554,42 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "batch": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -493,8 +597,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -548,7 +655,9 @@ var awsPartition = partition{ "cloud9": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -560,6 +669,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -571,6 +681,7 @@ var awsPartition = partition{ "cloudformation": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -578,9 +689,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -624,6 +737,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -631,9 +745,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -658,6 +774,7 @@ var awsPartition = partition{ "cloudtrail": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -665,9 +782,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -678,6 +797,7 @@ var awsPartition = partition{ "codebuild": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -685,9 +805,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -729,6 +851,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -748,6 +871,7 @@ var awsPartition = partition{ "codedeploy": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -755,9 +879,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -799,6 +925,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -880,9 +1007,25 @@ var awsPartition = partition{ Defaults: endpoint{ Protocols: []string{"https"}, }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehendmedical": service{ + Endpoints: endpoints{ "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -891,6 +1034,7 @@ var awsPartition = partition{ "config": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -898,9 +1042,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -908,12 +1054,35 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "connect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cur": service{ Endpoints: endpoints{ "us-east-1": endpoint{}, }, }, + "data.mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "datapipeline": service{ Endpoints: endpoints{ @@ -924,6 +1093,46 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "datasync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "dax": service{ Endpoints: endpoints{ @@ -931,6 +1140,7 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -948,6 +1158,7 @@ var awsPartition = partition{ "directconnect": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -955,9 +1166,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -974,6 +1187,7 @@ var awsPartition = partition{ "dms": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -981,9 +1195,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -991,18 +1207,78 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "ds": service{ + "docdb": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1015,16 +1291,24 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1032,11 +1316,36 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "ec2": service{ @@ -1044,6 +1353,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1051,9 +1361,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1072,29 +1384,10 @@ var awsPartition = partition{ }, }, }, - "ecr": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "ecs": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1102,9 +1395,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1115,6 +1410,7 @@ var awsPartition = partition{ "elasticache": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1122,6 +1418,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1131,16 +1428,18 @@ var awsPartition = partition{ Region: "us-west-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticbeanstalk": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1148,9 +1447,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1163,10 +1464,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1178,6 +1483,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1185,9 +1491,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1201,6 +1509,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1210,10 +1519,12 @@ var awsPartition = partition{ "eu-central-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, @@ -1238,9 +1549,12 @@ var awsPartition = partition{ "email": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "entitlement.marketplace": service{ @@ -1256,6 +1570,7 @@ var awsPartition = partition{ "es": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1263,19 +1578,28 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "events": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1283,9 +1607,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1296,6 +1622,7 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1303,6 +1630,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1318,9 +1646,33 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "gamelift": service{ @@ -1347,6 +1699,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1354,9 +1707,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1367,6 +1722,7 @@ var awsPartition = partition{ "glue": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1374,10 +1730,15 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1388,19 +1749,32 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, + "groundstation": service{ + + Endpoints: endpoints{ + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "guardduty": service{ IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1408,14 +1782,40 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "health": service{ @@ -1460,7 +1860,9 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1474,16 +1876,23 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1498,58 +1907,165 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "kinesis": service{ + "iotevents": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisvideo": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "kms": service{ + "ioteventsdata": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1557,9 +2073,43 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1567,9 +2117,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1598,6 +2150,7 @@ var awsPartition = partition{ "logs": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1605,9 +2158,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1628,6 +2183,26 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "mediaconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "mediaconvert": service{ Endpoints: endpoints{ @@ -1640,6 +2215,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1656,6 +2232,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -1672,6 +2249,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -1686,6 +2264,7 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -1698,6 +2277,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1705,9 +2285,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1744,6 +2326,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1751,9 +2334,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1761,6 +2346,25 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "mq": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "mturk-requester": service{ IsRegionalized: boxedFalse, @@ -1774,12 +2378,48 @@ var awsPartition = partition{ "neptune": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, "eu-west-1": endpoint{ Hostname: "rds.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1866,8 +2506,12 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "polly": service{ @@ -1880,6 +2524,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1890,29 +2535,29 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "rds": service{ + "projects.iot1click": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, - "redshift": service{ + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1922,30 +2567,85 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "rekognition": service{ + "rds": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "resource-groups": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1953,13 +2653,51 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1982,6 +2720,27 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "runtime.lex": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -1997,6 +2756,7 @@ var awsPartition = partition{ "runtime.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2004,12 +2764,39 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "s3": service{ @@ -2023,6 +2810,7 @@ var awsPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{ Hostname: "s3.ap-northeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -2039,12 +2827,14 @@ var awsPartition = partition{ }, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -2129,6 +2919,13 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, "eu-west-1": endpoint{ Hostname: "s3-control.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, @@ -2243,6 +3040,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2277,6 +3075,28 @@ var awsPartition = partition{ }, }, }, + "securityhub": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "serverlessrepo": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -2303,12 +3123,18 @@ var awsPartition = partition{ "eu-central-1": endpoint{ Protocols: []string{"https"}, }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, "eu-west-1": endpoint{ Protocols: []string{"https"}, }, "eu-west-2": endpoint{ Protocols: []string{"https"}, }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, "sa-east-1": endpoint{ Protocols: []string{"https"}, }, @@ -2336,6 +3162,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2373,6 +3200,7 @@ var awsPartition = partition{ "servicediscovery": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2380,9 +3208,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2390,10 +3220,20 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "session.qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "shield": service{ IsRegionalized: boxedFalse, Defaults: endpoint{ - SSLCommonName: "Shield.us-east-1.amazonaws.com", + SSLCommonName: "shield.us-east-1.amazonaws.com", Protocols: []string{"https"}, }, Endpoints: endpoints{ @@ -2403,6 +3243,7 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2410,9 +3251,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2424,6 +3267,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2444,6 +3288,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2451,9 +3296,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2467,6 +3314,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2474,6 +3322,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2501,7 +3350,8 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", }, @@ -2513,6 +3363,7 @@ var awsPartition = partition{ "ssm": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2520,9 +3371,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2533,6 +3386,7 @@ var awsPartition = partition{ "states": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2540,8 +3394,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -2551,6 +3409,7 @@ var awsPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2558,9 +3417,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2582,10 +3443,17 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -2593,11 +3461,36 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "sts": service{ @@ -2609,6 +3502,12 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "sts.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{ Hostname: "sts.ap-northeast-2.amazonaws.com", @@ -2622,11 +3521,18 @@ var awsPartition = partition{ "aws-global": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{ + Hostname: "sts.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "sts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2657,14 +3563,21 @@ var awsPartition = partition{ }, }, "support": service{ + PartitionEndpoint: "aws-global", Endpoints: endpoints{ - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "swf": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2672,9 +3585,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2684,6 +3599,29 @@ var awsPartition = partition{ }, "tagging": service{ + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2692,6 +3630,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2707,8 +3646,14 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "translate-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2748,9 +3693,17 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -2797,6 +3750,7 @@ var awsPartition = partition{ "xray": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2804,9 +3758,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2846,7 +3802,24 @@ var awscnPartition = partition{ }, }, Services: services{ - "apigateway": service{ + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "apigateway": service{ Endpoints: endpoints{ "cn-north-1": endpoint{}, @@ -2882,6 +3855,20 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "cloudtrail": service{ Endpoints: endpoints{ @@ -2966,13 +3953,6 @@ var awscnPartition = partition{ }, }, }, - "ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, "ecs": service{ Endpoints: endpoints{ @@ -3026,6 +4006,19 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "glacier": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -3035,6 +4028,15 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -3055,7 +4057,8 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "kinesis": service{ @@ -3065,6 +4068,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -3072,6 +4082,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "logs": service{ Endpoints: endpoints{ @@ -3079,6 +4096,17 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "monitoring": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -3088,6 +4116,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ @@ -3173,6 +4207,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "storagegateway": service{ Endpoints: endpoints{ @@ -3198,6 +4239,18 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, "swf": service{ Endpoints: endpoints{ @@ -3251,6 +4304,32 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "api.sagemaker": service{ Endpoints: endpoints{ @@ -3265,6 +4344,18 @@ var awsusgovPartition = partition{ }, }, "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, @@ -3306,6 +4397,7 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -3316,10 +4408,30 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "codedeploy": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", @@ -3329,6 +4441,14 @@ var awsusgovPartition = partition{ }, }, }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -3336,6 +4456,18 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "directconnect": service{ Endpoints: endpoints{ @@ -3350,10 +4482,23 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "ds": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "dynamodb": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", @@ -3381,13 +4526,6 @@ var awsusgovPartition = partition{ }, }, }, - "ecr": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, "ecs": service{ Endpoints: endpoints{ @@ -3415,6 +4553,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "elasticloadbalancing": service{ Endpoints: endpoints{ @@ -3436,6 +4580,13 @@ var awsusgovPartition = partition{ "es": service{ Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -3446,6 +4597,13 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "firehose": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "glacier": service{ Endpoints: endpoints{ @@ -3455,6 +4613,22 @@ var awsusgovPartition = partition{ }, }, }, + "glue": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "guardduty": service{ IsRegionalized: boxedTrue, Defaults: endpoint{ @@ -3464,6 +4638,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "health": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -3504,6 +4684,12 @@ var awsusgovPartition = partition{ "kms": service{ Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -3515,6 +4701,13 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "license-manager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "logs": service{ Endpoints: endpoints{ @@ -3522,6 +4715,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "metering.marketplace": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -3529,6 +4728,7 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -3539,9 +4739,46 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, - "polly": service{ + "neptune": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -3565,6 +4802,19 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "runtime.sagemaker": service{ Endpoints: endpoints{ @@ -3628,6 +4878,43 @@ var awsusgovPartition = partition{ }, }, }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "sms": service{ Endpoints: endpoints{ @@ -3638,6 +4925,7 @@ var awsusgovPartition = partition{ "snowball": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -3688,6 +4976,12 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", @@ -3732,5 +5026,626 @@ var awsusgovPartition = partition{ }, }, }, + "waf-regional": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 00000000..ca8fc828 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index e29c0951..9c936be6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -35,7 +35,7 @@ type Options struct { // // If resolving an endpoint on the partition list the provided region will // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unkonwn and resolving + // endpoint ID with. If both the service and region are unknown and resolving // the endpoint on partition list an UnknownEndpointError error will be returned. // // If resolving and endpoint on a partition specific resolver that partition's @@ -170,10 +170,13 @@ func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { // A Partition provides the ability to enumerate the partition's regions // and services. type Partition struct { - id string - p *partition + id, dnsSuffix string + p *partition } +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + // ID returns the identifier of the partition. func (p Partition) ID() string { return p.id } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index ff6f76db..523ad79a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -54,8 +54,9 @@ type partition struct { func (p partition) Partition() Partition { return Partition{ - id: p.ID, - p: &p, + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go index 05e92df2..0fdfcc56 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -16,6 +16,10 @@ import ( type CodeGenOptions struct { // Options for how the model will be decoded. DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool } // Set combines all of the option functions together @@ -39,8 +43,16 @@ func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGe return err } + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { return fmt.Errorf("failed to execute template, %v", err) } @@ -166,15 +178,17 @@ import ( "regexp" ) - {{ template "partition consts" . }} + {{ template "partition consts" $.Resolver }} - {{ range $_, $partition := . }} + {{ range $_, $partition := $.Resolver }} {{ template "partition region consts" $partition }} {{ end }} - {{ template "service consts" . }} + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} - {{ template "endpoint resolvers" . }} + {{ template "endpoint resolvers" $.Resolver }} {{- end }} {{ define "partition consts" }} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go index 271da432..d9b37f4d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -1,18 +1,17 @@ -// +build !appengine,!plan9 - package request import ( - "net" - "os" - "syscall" + "strings" ) func isErrConnectionReset(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - return sysErr.Err == syscall.ECONNRESET - } + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true } return false diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go deleted file mode 100644 index daf9eca4..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine plan9 - -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - return strings.Contains(err.Error(), "connection reset") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 605a72d3..185b0731 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -19,10 +19,11 @@ type Handlers struct { UnmarshalError HandlerList Retry HandlerList AfterRetry HandlerList + CompleteAttempt HandlerList Complete HandlerList } -// Copy returns of this handler's lists. +// Copy returns a copy of this handler's lists. func (h *Handlers) Copy() Handlers { return Handlers{ Validate: h.Validate.copy(), @@ -36,11 +37,12 @@ func (h *Handlers) Copy() Handlers { UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), Complete: h.Complete.copy(), } } -// Clear removes callback functions for all handlers +// Clear removes callback functions for all handlers. func (h *Handlers) Clear() { h.Validate.Clear() h.Build.Clear() @@ -53,9 +55,55 @@ func (h *Handlers) Clear() { h.ValidateResponse.Clear() h.Retry.Clear() h.AfterRetry.Clear() + h.CompleteAttempt.Clear() h.Complete.Clear() } +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + // A HandlerListRunItem represents an entry in the HandlerList which // is being run. type HandlerListRunItem struct { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index b0c2ef4f..9370fa50 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -15,12 +15,15 @@ type offsetReader struct { closed bool } -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { reader := &offsetReader{} - buf.Seek(offset, sdkio.SeekStart) + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } reader.buf = buf - return reader + return reader, nil } // Close will close the instance of the offset reader's access to @@ -54,7 +57,9 @@ func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { // CloseAndCopy will return a new offsetReader with a copy of the old buffer // and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } return newOffsetReader(o.buf, offset) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 63e7f71c..8e332cce 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "net" "net/http" "net/url" "reflect" @@ -65,6 +64,15 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + // A value greater than 0 instructs the request to be signed as Presigned URL // You should not set this field directly. Instead use Request's // Presign or PresignRequest methods. @@ -122,7 +130,6 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, Handlers: handlers.Copy(), Retryer: retryer, - AttemptTime: time.Now(), Time: time.Now(), ExpireTime: 0, Operation: operation, @@ -233,6 +240,10 @@ func (r *Request) WillRetry() bool { return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + // ParamsFilled returns if the request's parameters have been populated // and the parameters are valid. False is returned if no parameters are // provided or invalid. @@ -261,14 +272,25 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader - r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } r.ResetBody() } // Presign returns the request's signed URL. Error will be returned -// if the signing fails. The expire parameter is only used for presigned Amazon -// S3 API requests. All other AWS services will use a fixed expriation -// time of 15 minutes. +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. // // It is invalid to create a presigned URL with a expire duration 0 or less. An // error is returned if expire duration is 0 or less. @@ -287,7 +309,7 @@ func (r *Request) Presign(expire time.Duration) (string, error) { // PresignRequest behaves just like presign, with the addition of returning a // set of headers that were signed. The expire parameter is only used for // presigned Amazon S3 API requests. All other AWS services will use a fixed -// expriation time of 15 minutes. +// expiration time of 15 minutes. // // It is invalid to create a presigned URL with a expire duration 0 or less. An // error is returned if expire duration is 0 or less. @@ -332,16 +354,15 @@ func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, err return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } -func debugLogReqError(r *Request, stage string, retrying bool, err error) { +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { return } - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) } @@ -360,12 +381,12 @@ func (r *Request) Build() error { if !r.built { r.Handlers.Validate.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) + debugLogReqError(r, "Validate Request", notRetrying, r.Error) return r.Error } r.Handlers.Build.Run(r) if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } r.built = true @@ -381,7 +402,7 @@ func (r *Request) Build() error { func (r *Request) Sign() error { r.Build() if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } @@ -389,12 +410,16 @@ func (r *Request) Sign() error { return r.Error } -func (r *Request) getNextRequestBody() (io.ReadCloser, error) { +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { if r.safeBody != nil { r.safeBody.Close() } - r.safeBody = newOffsetReader(r.Body, r.BodyStart) + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } // Go 1.8 tightened and clarified the rules code needs to use when building // requests with the http package. Go 1.8 removed the automatic detection @@ -411,10 +436,10 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // Related golang/go#18257 l, err := aws.SeekerLen(r.Body) if err != nil { - return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) } - var body io.ReadCloser if l == 0 { body = NoBody } else if l > 0 { @@ -466,80 +491,90 @@ func (r *Request) Send() error { r.Handlers.Complete.Run(r) }() + if err := r.Error; err != nil { + return err + } + for { + r.Error = nil r.AttemptTime = time.Now() - if aws.BoolValue(r.Retryable) { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - } - r.Sign() - if r.Error != nil { - return r.Error + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err } - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - if !shouldRetryCancel(r) { - return r.Error - } - - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", false, err) - return r.Error - } - debugLogReqError(r, "Send Request", true, err) - continue + if err := r.sendRequest(); err == nil { + return nil } - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - err := r.Error - - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Response", false, err) - return r.Error - } - debugLogReqError(r, "Validate Response", true, err) - continue + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error } - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, err) - return r.Error - } - debugLogReqError(r, "Unmarshal Response", true, err) - continue + if err := r.prepareRetry(); err != nil { + r.Error = err + return err } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } - break + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error } return nil @@ -565,32 +600,6 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } -func shouldRetryCancel(r *Request) bool { - awsErr, ok := r.Error.(awserr.Error) - timeoutErr := false - errStr := r.Error.Error() - if ok { - if awsErr.Code() == CanceledErrorCode { - return false - } - err := awsErr.OrigErr() - netErr, netOK := err.(net.Error) - timeoutErr = netOK && netErr.Temporary() - if urlErr, ok := err.(*url.Error); !timeoutErr && ok { - errStr = urlErr.Err.Error() - } - } - - // There can be two types of canceled errors here. - // The first being a net.Error and the other being an error. - // If the request was timed out, we want to continue the retry - // process. Otherwise, return the canceled error. - return timeoutErr || - (errStr != "net/http: request canceled" && - errStr != "net/http: request canceled while waiting for connection") - -} - // SanitizeHostForHeader removes default port from host and updates request.Host func SanitizeHostForHeader(r *http.Request) { host := getHost(r) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index 7c6a8000..de1292f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -4,6 +4,8 @@ package request import ( "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" ) // NoBody is a http.NoBody reader instructing Go HTTP client to not include @@ -24,7 +26,8 @@ var NoBody = http.NoBody func (r *Request) ResetBody() { body, err := r.getNextRequestBody() if err != nil { - r.Error = err + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index a633ed5a..f093fc54 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -146,7 +146,7 @@ func (r *Request) nextPageTokens() []interface{} { return nil } case bool: - if v == false { + if !v { return nil } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 7d527029..e84084da 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -1,23 +1,41 @@ package request import ( + "net" + "net/url" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" ) -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the client.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. MaxRetries() int } -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { cfg.Retryer = retryer return cfg @@ -38,8 +56,10 @@ var throttleCodes = map[string]struct{}{ "ThrottlingException": {}, "RequestLimitExceeded": {}, "RequestThrottled": {}, + "RequestThrottledException": {}, "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, } // credsExpiredCodes is a collection of error codes which signify the credentials @@ -74,10 +94,6 @@ var validParentCodes = map[string]struct{}{ ErrCodeRead: {}, } -type temporaryError interface { - Temporary() bool -} - func isNestedErrorRetryable(parentErr awserr.Error) bool { if parentErr == nil { return false @@ -96,7 +112,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { return isCodeRetryable(aerr.Code()) } - if t, ok := err.(temporaryError); ok { + if t, ok := err.(temporary); ok { return t.Temporary() || isErrConnectionReset(err) } @@ -106,32 +122,90 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if error is nil. func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry := shouldRetryError(origErr) + if err.Code() == "RequestError" && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true } - return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. // Returns false if error is nil. func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) } return false } -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) } return false } @@ -141,17 +215,58 @@ func IsErrorExpiredCreds(err error) bool { // // Alias for the utility function IsErrorRetryable func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } return IsErrorRetryable(r.Error) } -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. // // Alias for the utility function IsErrorThrottle func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + return IsErrorThrottle(r.Error) } +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index bcfd947a..8630683f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -17,6 +17,8 @@ const ( ParamMinValueErrCode = "ParamMinValueError" // ParamMinLenErrCode is the error code for fields without enough elements. ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" // ParamFormatErrCode is the error code for a field with invalid // format or characters. @@ -237,6 +239,29 @@ func (e *ErrParamMinLen) MinLen() int { return e.min } +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + // An ErrParamFormat represents a invalid format parameter error. type ErrParamFormat struct { errInvalidParam diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go new file mode 100644 index 00000000..ea9ebb6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go @@ -0,0 +1,26 @@ +// +build go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go new file mode 100644 index 00000000..fec39dfc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go new file mode 100644 index 00000000..1c5a5391 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 00000000..7713ccfc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,259 @@ +package session + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_IAM_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_IAM_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + opt.Duration = sessOpts.AssumeRoleDuration + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 98d420fd..7ec66e7e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -1,97 +1,93 @@ /* -Package session provides configuration for the SDK's service clients. - -Sessions can be shared across all service clients that share the same base -configuration. The Session is built from the SDK's default configuration and -request handlers. - -Sessions should be cached when possible, because creating a new Session will -load all configuration values from the environment, and config files each time -the Session is created. Sharing the Session value across all of your service -clients will ensure the configuration is loaded the fewest number of times possible. - -Concurrency +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. Sessions are safe to use concurrently as long as the Session is not being -modified. The SDK will not modify the Session once the Session has been created. -Creating service clients concurrently from a shared Session is safe. - -Sessions from Shared Config - -Sessions can be created using the method above that will only load the -additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. -Alternatively you can explicitly create a Session with shared config enabled. -To do this you can use NewSessionWithOptions to configure how the Session will -be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG -environment variable was set. +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. -Creating Sessions - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. +Sessions options from Shared Config By default NewSession will only load credentials from the shared credentials file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value the Session will be created from the configuration values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. See the section Sessions from Shared Config for -more information. +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. -Create a Session with the default config and request handlers. With credentials -region, and profile loaded from the environment and shared config automatically. -Requires the AWS_PROFILE to be set, or "default" is used. +Credential and config loading order - // Create Session - sess := session.Must(session.NewSession()) +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: - // Create a Session with a custom region - sess := session.Must(session.NewSession(&aws.Config{ - Region: aws.String("us-east-1"), - })) + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) - // Create a S3 client instance from a session - sess := session.Must(session.NewSession()) +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) - svc := s3.New(sess) +Creating Sessions -Create Session With Option Overrides +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. -In addition to NewSession, Sessions can be created using NewSessionWithOptions. -This func allows you to control and override how the Session will be created -through code instead of being driven by environment variables only. + // Create Session + sess, err := session.NewSession() -Use NewSessionWithOptions when you want to provide the config profile, or -override the shared config state (AWS_SDK_LOAD_CONFIG). + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). // Equivalent to session.NewSession() - sess := session.Must(session.NewSessionWithOptions(session.Options{ + sess, err := session.NewSessionWithOptions(session.Options{ // Options - })) + }) - // Specify profile to load for the session's config - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Profile: "profile_name", - })) + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", - // Specify profile for config and region for requests - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Config: aws.Config{Region: aws.String("us-east-1")}, - Profile: "profile_name", - })) + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, - // Force enable Shared Config support - sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Force enable Shared Config support SharedConfigState: session.SharedConfigEnable, - })) + }) Adding Handlers -You can add handlers to a session for processing HTTP requests. All service -clients that use the session inherit the handlers. For example, the following -handler logs every request and its payload made by a service client: +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. // Create a session, and add additional handlers for all service // clients created with the Session to inherit. Adds logging handler. @@ -99,22 +95,15 @@ handler logs every request and its payload made by a service client: sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload - logger.Println("Request: %s/%s, Payload: %s", + logger.Printf("Request: %s/%s, Params: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) }) -Deprecated "New" function - -The New session function has been deprecated because it does not provide good -way to return errors that occur when loading the configuration files and values. -Because of this, NewSession was created so errors can be retrieved when -creating a session fails. - Shared Config Fields -By default the SDK will only load the shared credentials file's (~/.aws/credentials) -credentials values, and all other config is provided by the environment variables, -SDK defaults, and user provided aws.Config values. +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable option is used to create the Session the full shared config values will be @@ -125,24 +114,31 @@ files have the same format. If both config files are present the configuration from both files will be read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). -Credentials are the values the SDK should use for authenticating requests with -AWS Services. They are from a configuration file will need to include both -aws_access_key_id and aws_secret_access_key must be provided together in the -same file to be considered valid. The values will be ignored if not a complete -group. aws_session_token is an optional field that can be provided if both of -the other two fields are also provided. +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. aws_access_key_id = AKID aws_secret_access_key = SECRET aws_session_token = TOKEN -Assume Role values allow you to configure the SDK to assume an IAM role using -a set of credentials provided in a config file via the source_profile field. -Both "role_arn" and "source_profile" are required. The SDK supports assuming -a role with MFA token if the session option AssumeRoleTokenProvider -is set. + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". role_arn = arn:aws:iam:::role/ source_profile = profile_with_creds @@ -150,40 +146,16 @@ is set. mfa_serial = role_session_name = session_name -Region is the region the SDK should use for looking up AWS service endpoints -and signing requests. - - region = us-east-1 - -Assume Role with MFA token -To create a session with support for assuming an IAM role with MFA set the -session option AssumeRoleTokenProvider to a function that will prompt for the -MFA token code when the SDK assumes the role and refreshes the role's credentials. -This allows you to configure the SDK via the shared config to assumea role -with MFA tokens. - -In order for the SDK to assume a role with MFA the SharedConfigState -session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG -environment variable set. - -The shared configuration instructs the SDK to assume an IAM role with MFA -when the mfa_serial configuration field is set in the shared config -(~/.aws/config) or shared credentials (~/.aws/credentials) file. - -If mfa_serial is set in the configuration, the SDK will assume the role, and -the AssumeRoleTokenProvider session option is not set an an error will -be returned when creating the session. +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. sess := session.Must(session.NewSessionWithOptions(session.Options{ AssumeRoleTokenProvider: stscreds.StdinTokenProvider, })) - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess) - -To setup assume role outside of a session see the stscrds.AssumeRoleProvider +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider documentation. Environment Variables diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index c94d0fb9..60a6f9ce 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -80,7 +80,7 @@ type envConfig struct { // AWS_CONFIG_FILE=$HOME/my_shared_config SharedConfigFile string - // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file // that the SDK will use instead of the system's root CA bundle. // Only use this if you want to configure the SDK to use a custom set // of CAs. @@ -99,21 +99,41 @@ type envConfig struct { CustomCABundle string csmEnabled string - CSMEnabled bool + CSMEnabled *bool CSMPort string + CSMHost string CSMClientID string - enableEndpointDiscovery string // Enables endpoint discovery via environment variables. // // AWS_ENABLE_ENDPOINT_DISCOVERY=true EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string } var ( csmEnabledEnvKey = []string{ "AWS_CSM_ENABLED", } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } csmPortEnvKey = []string{ "AWS_CSM_PORT", } @@ -150,6 +170,15 @@ var ( sharedConfigFileEnvKey = []string{ "AWS_CONFIG_FILE", } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -178,21 +207,33 @@ func envConfigLoad(enableSharedConfig bool) envConfig { cfg.EnableSharedConfig = enableSharedConfig - setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) // CSM environment variables setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - cfg.CSMEnabled = len(cfg.csmEnabled) > 0 - // Require logical grouping of credentials - if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { - cfg.Creds = credentials.Value{} - } else { - cfg.Creds.ProviderName = EnvProviderName + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v } regionKeys := regionEnvKeys diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 9b1ad609..7b0a942e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -8,18 +8,17 @@ import ( "io/ioutil" "net/http" "os" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" ) const ( @@ -105,8 +104,20 @@ func New(cfgs ...*aws.Config) *Session { } s := deprecatedNewSession(cfgs...) - if envCfg.CSMEnabled { - enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + err = fmt.Errorf("failed to enable CSM, %v", err) + s.Config.Logger.Log("ERROR:", err.Error()) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } } return s @@ -125,7 +136,7 @@ func New(cfgs ...*aws.Config) *Session { // to be built with retrieving credentials with AssumeRole set in the config. // // See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created. Such as specifying the +// control through code how the Session will be created, such as specifying the // config profile, and controlling if shared config is enabled or not. func NewSession(cfgs ...*aws.Config) (*Session, error) { opts := Options{} @@ -209,6 +220,12 @@ type Options struct { // the config enables assume role wit MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + // Reader for a custom Credentials Authority (CA) bundle in PEM format that // the SDK will use instead of the default system's root CA bundle. Use this // only if you want to replace the CA bundle the SDK uses for TLS requests. @@ -223,6 +240,12 @@ type Options struct { // to also enable this feature. CustomCABundle session option field has priority // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -262,7 +285,7 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg = loadEnvConfig() } - if len(opts.Profile) > 0 { + if len(opts.Profile) != 0 { envCfg.Profile = opts.Profile } @@ -328,27 +351,33 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { return s } -func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { - logger.Log("Enabling CSM") - if len(port) == 0 { - port = csm.DefaultPort +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") } - r, err := csm.Start(clientID, "127.0.0.1:"+port) + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) if err != nil { - return + return err } r.InjectHandlers(handlers) + + return nil } func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() - handlers := defaults.Handlers() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } // Get a merged version of the user provided config to determine if // credentials were. userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) // Ordered config files will be loaded in with later files overwriting // previous config file values. @@ -365,9 +394,17 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { - return nil, err + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } } if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { @@ -380,8 +417,16 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) - if envCfg.CSMEnabled { - enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } } // Setup HTTP client with custom cert bundle if enabled @@ -394,6 +439,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return s, nil } +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + func loadCustomCABundle(s *Session, bundle io.Reader) error { var t *http.Transport switch v := s.Config.HTTPClient.Transport.(type) { @@ -406,7 +491,10 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error { } } if t == nil { - t = &http.Transport{} + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCABundleTransport() } p, err := loadCertPool(bundle) @@ -439,9 +527,11 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) { return p, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { - // Merge in user provided configuration - cfg.MergeIn(userCfg) +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { // Region if not already set by user if len(aws.StringValue(cfg.Region)) == 0 { @@ -452,7 +542,7 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share } } - if aws.BoolValue(envCfg.EnableEndpointDiscovery) { + if cfg.EnableEndpointDiscovery == nil { if envCfg.EnableEndpointDiscovery != nil { cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { @@ -460,160 +550,19 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share } } - // Configure credentials if not already set + // Configure credentials if not already set by the user when creating the + // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - - // inspect the profile to see if a credential source has been specified. - if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { - - // if both credential_source and source_profile have been set, return an error - // as this is undefined behavior. - if len(sharedCfg.AssumeRole.SourceProfile) > 0 { - return ErrSharedConfigSourceCollision - } - - // valid credential source values - const ( - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" - ) - - switch sharedCfg.AssumeRole.CredentialSource { - case credSourceEc2Metadata: - cfgCp := *cfg - p := defaults.RemoteCredProvider(cfgCp, handlers) - cfgCp.Credentials = credentials.NewCredentials(p) - - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - - cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) - case credSourceEnvironment: - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - case credSourceECSContainer: - if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - return ErrSharedConfigECSContainerEnvVarEmpty - } - - cfgCp := *cfg - p := defaults.RemoteCredProvider(cfgCp, handlers) - creds := credentials.NewCredentials(p) - - cfg.Credentials = creds - default: - return ErrSharedConfigInvalidCredSource - } - - return nil - } - - if len(envCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { - cfgCp := *cfg - cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.AssumeRoleSource.Creds, - ) - - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - - cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) - } else if len(sharedCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - } else { - // Fallback to default credentials provider, include mock errors - // for the credential chain so user can identify why credentials - // failed to be retrieved. - cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, - &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err } + cfg.Credentials = creds } return nil } -func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials { - return stscreds.NewCredentials( - &Session{ - Config: &cfg, - Handlers: handlers.Copy(), - }, - sharedCfg.AssumeRole.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName - - // Assume role with external ID - if len(sharedCfg.AssumeRole.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.AssumeRole.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ) -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the -// MFAToken option is not set when shared config is configured load assume a -// role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error -} - -var emptyCreds = credentials.Value{} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true -} - func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) @@ -622,7 +571,7 @@ func initHandlers(s *Session) { } } -// Copy creates and returns a copy of the current Session, coping the config +// Copy creates and returns a copy of the current Session, copying the config // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 427b8a4e..d91ac93a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -5,7 +5,6 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/internal/ini" ) @@ -23,45 +22,57 @@ const ( mfaSerialKey = `mfa_serial` // optional roleSessionNameKey = `role_session_name` // optional + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + // Additional Config fields regionKey = `region` // endpoint discovery group enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. DefaultSharedConfigProfile = `default` ) -type assumeRoleConfig struct { - RoleARN string - SourceProfile string - CredentialSource string - ExternalID string - MFASerial string - RoleSessionName string -} - // sharedConfig represents the configuration fields of the SDK config files. type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. // // aws_access_key_id // aws_secret_access_key // aws_session_token Creds credentials.Value - AssumeRole assumeRoleConfig - AssumeRoleSource *sharedConfig + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string - // Region is the region the SDK should use for looking up AWS service endpoints - // and signing requests. + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. // // region Region string @@ -71,6 +82,12 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string } type sharedConfigFile struct { @@ -78,17 +95,18 @@ type sharedConfigFile struct { IniData ini.Sections } -// loadSharedConfig retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in // earlier files. // // For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. +// of the files are A then B, B's credential values will be used instead of +// A's. // // See sharedConfig.setFromFile for information how the config files // will be loaded. -func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { if len(profile) == 0 { profile = DefaultSharedConfigProfile } @@ -99,16 +117,11 @@ func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) } cfg := sharedConfig{} - if err = cfg.setFromIniFiles(profile, files); err != nil { + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { return sharedConfig{}, err } - if len(cfg.AssumeRole.SourceProfile) > 0 { - if err := cfg.setAssumeRoleSource(profile, files); err != nil { - return sharedConfig{}, err - } - } - return cfg, nil } @@ -132,60 +145,88 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { return files, nil } -func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { - var assumeRoleSrc sharedConfig - - if len(cfg.AssumeRole.CredentialSource) > 0 { - // setAssumeRoleSource is only called when source_profile is found. - // If both source_profile and credential_source are set, then - // ErrSharedConfigSourceCollision will be returned - return ErrSharedConfigSourceCollision +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr } - // Multiple level assume role chains are not support - if cfg.AssumeRole.SourceProfile == origProfile { - assumeRoleSrc = *cfg - assumeRoleSrc.AssumeRole = assumeRoleConfig{} + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() } else { - err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) - if err != nil { + // First time a profile has been seen, It must either be a assume role + // or credentials. Assert if the credential type requires a role ARN, + // the ARN is also set. + if err := cfg.validateCredentialsRequireARN(profile); err != nil { return err } } + profiles[profile] = struct{}{} - if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { - return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + if err := cfg.validateCredentialType(); err != nil { + return err } - cfg.AssumeRoleSource = &assumeRoleSrc + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() - return nil -} - -func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { - // Trim files from the list that don't exist. - for _, f := range files { - if err := cfg.setFromIniFile(profile, f); err != nil { + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore proviles missings - continue + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } } return err } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg } return nil } -// setFromFile loads the configuration from the file using -// the profile provided. A sharedConfig pointer type value is used so that -// multiple config file loadings can be chained. +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. // // Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { section, ok := file.IniData.GetSection(profile) if !ok { // Fallback to to alternate profile name: profile @@ -195,48 +236,143 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e } } + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + + updateString(&cfg.Region, section, regionKey) + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + // Shared Credentials - akid := section.String(accessKeyIDKey) - secret := section.String(secretAccessKey) - if len(akid) > 0 && len(secret) > 0 { - cfg.Creds = credentials.Value{ - AccessKeyID: akid, - SecretAccessKey: secret, - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), - } + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds } - // Assume Role - roleArn := section.String(roleArnKey) - srcProfile := section.String(sourceProfileKey) - credentialSource := section.String(credentialSourceKey) - hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 - if len(roleArn) > 0 && hasSource { - cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - CredentialSource: credentialSource, - ExternalID: section.String(externalIDKey), - MFASerial: section.String(mfaSerialKey), - RoleSessionName: section.String(roleSessionNameKey), - } + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey } - // Region - if v := section.String(regionKey); len(v) > 0 { - cfg.Region = v + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } } - // Endpoint discovery - if section.Has(enableEndpointDiscoveryKey) { - v := section.Bool(enableEndpointDiscoveryKey) - cfg.EnableEndpointDiscovery = &v + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision } return nil } +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + // SharedConfigLoadError is an error for the shared config file failed to load. type SharedConfigLoadError struct { Filename string @@ -294,7 +430,8 @@ func (e SharedConfigProfileNotExistsError) Error() string { // profile contains assume role information, but that information is invalid // or not complete. type SharedConfigAssumeRoleError struct { - RoleARN string + RoleARN string + SourceProfile string } // Code is the short id of the error. @@ -304,8 +441,10 @@ func (e SharedConfigAssumeRoleError) Code() string { // Message is the description of the error func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", - e.RoleARN) + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) } // OrigErr is the underlying error that caused the failure. @@ -317,3 +456,36 @@ func (e SharedConfigAssumeRoleError) OrigErr() error { func (e SharedConfigAssumeRoleError) Error() string { return awserr.SprintError(e.Code(), e.Message(), "", nil) } + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 155645d6..8104793a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -98,25 +98,25 @@ var ignoredHeaders = rules{ var requiredSignedHeaders = rules{ whitelist{ mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, @@ -134,7 +134,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Tagging": struct{}{}, + "X-Amz-Tagging": struct{}{}, "X-Amz-Website-Redirect-Location": struct{}{}, "X-Amz-Content-Sha256": struct{}{}, }, @@ -182,7 +182,7 @@ type Signer struct { // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html DisableURIPathEscaping bool - // Disales the automatical setting of the HTTP request's Body field with the + // Disables the automatical setting of the HTTP request's Body field with the // io.ReadSeeker passed in to the signer. This is useful if you're using a // custom wrapper around the body for the io.ReadSeeker and want to preserve // the Body value on the Request.Body. @@ -422,7 +422,7 @@ var SignRequestHandler = request.NamedHandler{ // If the credentials of the request's config are set to // credentials.AnonymousCredentials the request will not be signed. func SignSDKRequest(req *request.Request) { - signSDKRequestWithCurrTime(req, time.Now) + SignSDKRequestWithCurrentTime(req, time.Now) } // BuildNamedHandler will build a generic handler for signing. @@ -430,12 +430,15 @@ func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler return request.NamedHandler{ Name: name, Fn: func(req *request.Request) { - signSDKRequestWithCurrTime(req, time.Now, opts...) + SignSDKRequestWithCurrentTime(req, time.Now, opts...) }, } } -func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { // If the request does not need to be signed ignore the signing of the // request if the AnonymousCredentials object is used. if req.Config.Credentials == credentials.AnonymousCredentials { @@ -471,13 +474,9 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time opt(v4) } - signingTime := req.Time - if !req.LastSignedAt.IsZero() { - signingTime = req.LastSignedAt - } - + curTime := curTimeFn() signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, req.ExpireTime > 0, signingTime, + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, ) if err != nil { req.Error = err @@ -486,7 +485,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time } req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTimeFn() + req.LastSignedAt = curTime } const logSignInfoMsg = `DEBUG: Request Signature: @@ -688,7 +687,11 @@ func (ctx *signingCtx) buildBodyDigest() error { if !aws.IsReaderSeekable(ctx.Body) { return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) } - hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) } if includeSHA256Header { @@ -735,10 +738,16 @@ func makeSha256(data []byte) []byte { return hash.Sum(nil) } -func makeSha256Reader(reader io.ReadSeeker) []byte { +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { hash := sha256.New() - start, _ := reader.Seek(0, sdkio.SeekCurrent) - defer reader.Seek(start, sdkio.SeekStart) + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. @@ -749,13 +758,13 @@ func makeSha256Reader(reader io.ReadSeeker) []byte { io.CopyN(hash, reader, size) } - return hash.Sum(nil) + return hash.Sum(nil), nil } const doubleSpace = " " // stripExcessSpaces will rewrite the passed in slice's string values to not -// contain muliple side-by-side spaces. +// contain multiple side-by-side spaces. func stripExcessSpaces(vals []string) { var j, k, l, m, spaces int for i, str := range vals { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 8b6f2342..45509154 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -7,13 +7,18 @@ import ( "github.com/aws/aws-sdk-go/internal/sdkio" ) -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should -// only be used with an io.Reader that is also an io.Seeker. Doing so may -// cause request signature errors, or request body's not sent for GET, HEAD -// and DELETE HTTP methods. +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. // -// Deprecated: Should only be used with io.ReadSeeker. If using for -// S3 PutObject to stream content use s3manager.Uploader instead. +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } @@ -43,7 +48,8 @@ func IsReaderSeekable(r io.Reader) bool { // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. // // Performs the same functionality as io.Reader Read func (r ReaderSeekerCloser) Read(p []byte) (int, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 64e80aca..d1548ebd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.15.78" +const SDKVersion = "1.25.3" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go index 84b50c2e..e56dcee2 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -304,7 +304,9 @@ loop: stmt := newCommentStatement(tok) stack.Push(stmt) default: - return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok)) + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) } if len(tokens) > 0 { @@ -314,10 +316,10 @@ loop: // this occurs when a statement has not been completed if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) } - // returns a sublist which exludes the start symbol + // returns a sublist which excludes the start symbol return stack.List(), nil } @@ -335,13 +337,12 @@ func trimSpaces(k AST) AST { } // trim right hand side of spaces - for i := len(k.Root.raw) - 1; i > 0; i-- { + for i := len(k.Root.raw) - 1; i >= 0; i-- { if !isWhitespace(k.Root.raw[i]) { break } k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] - i-- } return k diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go index ba0af01b..18f3fe89 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -15,7 +15,7 @@ func newExprStatement(ast AST) AST { return newAST(ASTKindExprStatement, ast) } -// CommentStatement represents a comment in the ini defintion. +// CommentStatement represents a comment in the ini definition. // // grammar: // comment -> #comment' | ;comment' diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 00000000..6c443988 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 00000000..44898eed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 00000000..810ec7f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 00000000..f4651da2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 00000000..b1d93a33 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go index b63e4c26..7da8a49c 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -7,6 +7,6 @@ const ( ) // ECSContainerCredentialsURI is the endpoint to retrieve container -// credentials. This can be overriden to test to ensure the credential process +// credentials. This can be overridden to test to ensure the credential process // is behaving correctly. var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go index f06f44ee..d7d42db0 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -1,7 +1,54 @@ package protocol -// ValidHostLabel returns if the label is a valid RFC 1123 Section 2.1 domain -// host label name. +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. func ValidHostLabel(label string) bool { if l := len(label); l == 0 || l > 63 { return false diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 00000000..915b0fca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 00000000..864fb670 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 00000000..ea0da79a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,250 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshalAny(reflect.ValueOf(v), out, "") +} + +func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return unmarshalStruct(value, data, tag) + case "list": + return unmarshalList(value, data, tag) + case "map": + return unmarshalMap(value, data, tag) + default: + return unmarshalScalar(value, data, tag) + } +} + +func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + member := value.FieldByIndex(field.Index) + err := unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + // Time unmarshaled from a float64 can only be epoch seconds + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 60e5b09d..0cb99eb5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -21,7 +21,7 @@ func Build(r *request.Request) { "Version": {r.ClientInfo.APIVersion}, } if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index 3495c730..f69c1efc 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -24,7 +24,7 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding Query response", err), + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index 46d354e8..831b0110 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -2,73 +2,68 @@ package query import ( "encoding/xml" - "io/ioutil" + "fmt" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + type xmlErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` } -type xmlServiceUnavailableResponse struct { - XMLName xml.Name `xml:"ServiceUnavailableException"` +type xmlResponseError struct { + xmlErrorResponse } -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} // UnmarshalError unmarshals an error response for an AWS Query service. func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to read from query HTTP response body", err), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), r.HTTPResponse.StatusCode, r.RequestID, ) return } - // First check for specific error - resp := xmlErrorResponse{} - decodeErr := xml.Unmarshal(bodyBytes, &resp) - if decodeErr == nil { - reqID := resp.RequestID - if reqID == "" { - reqID = r.RequestID - } - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) - return - } - - // Check for unhandled error - servUnavailResp := xmlServiceUnavailableResponse{} - unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) - if unavailErr == nil { - r.Error = awserr.NewRequestFailure( - awserr.New("ServiceUnavailableException", "service is unavailable", nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID } - // Failed to retrieve any error message from the response body r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr), + awserr.New(respErr.Code, respErr.Message, nil), r.HTTPResponse.StatusCode, - r.RequestID, + reqID, ) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index b34f5258..1301b149 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -25,6 +25,8 @@ var noEscape [256]bool var errValueNotSet = fmt.Errorf("value not set") +var byteSliceType = reflect.TypeOf([]byte{}) + func init() { for i := 0; i < len(noEscape); i++ { // AWS expects every character except these to be escaped @@ -94,6 +96,14 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo continue } + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + var err error switch field.Tag.Get("location") { case "headers": // header maps @@ -137,7 +147,7 @@ func buildBody(r *request.Request, v reflect.Value) { case string: r.SetStringBody(reader) default: - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to encode REST request", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -152,9 +162,12 @@ func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect. if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + header.Add(name, str) return nil @@ -167,11 +180,13 @@ func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) if err == errValueNotSet { continue } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) - header.Add(prefix+key.String(), str) + header.Add(prefix+keyStr, str) } return nil } @@ -181,7 +196,7 @@ func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) e if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) @@ -214,7 +229,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string, tag reflec if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } query.Set(name, str) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 33fd53b1..74e361e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -57,7 +57,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } else { payload.Set(reflect.ValueOf(b)) } @@ -65,7 +65,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } else { str := string(b) payload.Set(reflect.ValueOf(&str)) @@ -77,7 +77,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { case "io.ReadSeeker": b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to read response body", err) return } @@ -85,7 +85,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { default: io.Copy(ioutil.Discard, r.HTTPResponse.Body) defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -115,14 +115,14 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { case "header": err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) break } case "headers": prefix := field.Tag.Get("locationName") err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) break } } @@ -146,6 +146,9 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + if len(headers) == 0 { + return nil + } switch r.Interface().(type) { case map[string]*string: // we only support string map value types out := map[string]*string{} @@ -155,19 +158,28 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err out[k[len(prefix):]] = &v[0] } } - r.Set(reflect.ValueOf(out)) + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + } return nil } func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - isJSONValue := tag.Get("type") == "jsonvalue" - if isJSONValue { + switch tag.Get("type") { + case "jsonvalue": if len(header) == 0 { return nil } - } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } } switch v.Interface().(type) { @@ -178,7 +190,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro if err != nil { return err } - v.Set(reflect.ValueOf(&b)) + v.Set(reflect.ValueOf(b)) case *bool: b, err := strconv.ParseBool(header) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index b0f4e245..07a6187e 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -37,8 +37,9 @@ func Build(r *request.Request) { err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to encode rest XML request", err), - r.HTTPResponse.StatusCode, + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, r.RequestID, ) return @@ -55,7 +56,8 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to decode REST XML response", err), + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index b7ed6c6f..05d4ff51 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -1,8 +1,11 @@ package protocol import ( + "math" "strconv" "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" ) // Names of time formats supported by the SDK @@ -13,12 +16,19 @@ const ( ) // Time formats supported by the SDK +// Output time is intended to not contain decimals const ( // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05Z" + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time without seconds precision + ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" ) // IsKnownTimestampFormat returns if the timestamp format name @@ -42,9 +52,9 @@ func FormatTime(name string, t time.Time) string { switch name { case RFC822TimeFormatName: - return t.Format(RFC822TimeFormat) + return t.Format(RFC822OutputTimeFormat) case ISO8601TimeFormatName: - return t.Format(ISO8601TimeFormat) + return t.Format(ISO8601OutputTimeFormat) case UnixTimeFormatName: return strconv.FormatInt(t.Unix(), 10) default: @@ -62,10 +72,12 @@ func ParseTime(formatName, value string) (time.Time, error) { return time.Parse(ISO8601TimeFormat, value) case UnixTimeFormatName: v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 if err != nil { return time.Time{}, err } - return time.Unix(int64(v), 0), nil + return time.Unix(int64(v), int64(dec*(1e9))), nil default: panic("unknown timestamp format name, " + formatName) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 1bfe45f6..cf981fe9 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -87,7 +87,7 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle } } -// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested // types are converted to XMLNodes also. func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { if !value.IsValid() { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 00000000..c1a51185 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index ff1ef683..7108d380 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -1,6 +1,7 @@ package xmlutil import ( + "bytes" "encoding/base64" "encoding/xml" "fmt" @@ -10,9 +11,27 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/private/protocol" ) +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + // UnmarshalXML deserializes an xml.Decoder into the container v. V // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 515ce152..42f71648 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -119,7 +119,18 @@ func (n *XMLNode) findElem(name string) (string, bool) { // StructToXML writes an XMLNode to a xml.Encoder as tokens. func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) if node.Text != "" { e.EncodeToken(xml.CharData([]byte(node.Text))) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index d67cfde7..b4a4e8c4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -460,8 +460,7 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request output = &DeleteBucketOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -537,8 +536,7 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt output = &DeleteBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -547,6 +545,10 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // Deletes an analytics configuration for the bucket (specified by the analytics // configuration ID). // +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -614,8 +616,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request output = &DeleteBucketCorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -690,8 +691,7 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( output = &DeleteBucketEncryptionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -766,8 +766,7 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent output = &DeleteBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -843,8 +842,7 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re output = &DeleteBucketLifecycleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -919,8 +917,7 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC output = &DeleteBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -996,8 +993,7 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req output = &DeleteBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -1072,15 +1068,14 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) output = &DeleteBucketReplicationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketReplication API operation for Amazon Simple Storage Service. // // Deletes the replication configuration from the bucket. For information about -// replication configuration, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) // in the Amazon S3 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1150,8 +1145,7 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r output = &DeleteBucketTaggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -1226,8 +1220,7 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r output = &DeleteBucketWebsiteOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -1527,14 +1520,13 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) output = &DeletePublicAccessBlockOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeletePublicAccessBlock API operation for Amazon Simple Storage Service. // -// Removes the Public Access Block configuration for an Amazon S3 bucket. +// Removes the PublicAccessBlock configuration from an Amazon S3 bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2059,7 +2051,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // GetBucketLifecycle API operation for Amazon Simple Storage Service. // -// Deprecated, see the GetBucketLifecycleConfiguration operation. +// No longer used, see the GetBucketLifecycleConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2440,7 +2432,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // GetBucketNotification API operation for Amazon Simple Storage Service. // -// Deprecated, see the GetBucketNotificationConfiguration operation. +// No longer used, see the GetBucketNotificationConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3229,6 +3221,230 @@ func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, return out, req.Send() } +const opGetObjectLegalHold = "GetObjectLegalHold" + +// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLegalHold for more information on using the GetObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectLegalHoldRequest method. +// req, resp := client.GetObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opGetObjectLegalHold, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &GetObjectLegalHoldInput{} + } + + output = &GetObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLegalHold API operation for Amazon Simple Storage Service. +// +// Gets an object's current Legal Hold status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + return out, req.Send() +} + +// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLockConfiguration = "GetObjectLockConfiguration" + +// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectLockConfigurationRequest method. +// req, resp := client.GetObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opGetObjectLockConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &GetObjectLockConfigurationInput{} + } + + output = &GetObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the object lock configuration for a bucket. The rule specified in the +// object lock configuration will be applied by default to every new object +// placed in the specified bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectRetention = "GetObjectRetention" + +// GetObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectRetention for more information on using the GetObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRetentionRequest method. +// req, resp := client.GetObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { + op := &request.Operation{ + Name: opGetObjectRetention, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &GetObjectRetentionInput{} + } + + output = &GetObjectRetentionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectRetention API operation for Amazon Simple Storage Service. +// +// Retrieves an object's retention settings. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + return out, req.Send() +} + +// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetObjectTagging = "GetObjectTagging" // GetObjectTaggingRequest generates a "aws/request.Request" representing the @@ -3421,7 +3637,7 @@ func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req // GetPublicAccessBlock API operation for Amazon Simple Storage Service. // -// Retrieves the Public Access Block configuration for an Amazon S3 bucket. +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3490,8 +3706,7 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou output = &HeadBucketOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -3999,7 +4214,7 @@ func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipa // // Example iterating over at most 3 pages of a ListMultipartUploads operation. // pageNum := 0 // err := client.ListMultipartUploadsPages(params, -// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4129,7 +4344,7 @@ func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVer // // Example iterating over at most 3 pages of a ListObjectVersions operation. // pageNum := 0 // err := client.ListObjectVersionsPages(params, -// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4266,7 +4481,7 @@ func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, op // // Example iterating over at most 3 pages of a ListObjects operation. // pageNum := 0 // err := client.ListObjectsPages(params, -// func(page *ListObjectsOutput, lastPage bool) bool { +// func(page *s3.ListObjectsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4404,7 +4619,7 @@ func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input // // Example iterating over at most 3 pages of a ListObjectsV2 operation. // pageNum := 0 // err := client.ListObjectsV2Pages(params, -// func(page *ListObjectsV2Output, lastPage bool) bool { +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4534,7 +4749,7 @@ func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts . // // Example iterating over at most 3 pages of a ListParts operation. // pageNum := 0 // err := client.ListPartsPages(params, -// func(page *ListPartsOutput, lastPage bool) bool { +// func(page *s3.ListPartsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4612,8 +4827,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC output = &PutBucketAccelerateConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4688,8 +4902,7 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request output = &PutBucketAclOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4764,8 +4977,7 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon output = &PutBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4841,8 +5053,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque output = &PutBucketCorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4917,8 +5128,7 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r output = &PutBucketEncryptionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4994,8 +5204,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon output = &PutBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5076,14 +5285,13 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req output = &PutBucketLifecycleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketLifecycle API operation for Amazon Simple Storage Service. // -// Deprecated, see the PutBucketLifecycleConfiguration operation. +// No longer used, see the PutBucketLifecycleConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5156,8 +5364,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon output = &PutBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5233,8 +5440,7 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request output = &PutBucketLoggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5311,8 +5517,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu output = &PutBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5393,14 +5598,13 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re output = &PutBucketNotificationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketNotification API operation for Amazon Simple Storage Service. // -// Deprecated, see the PutBucketNotificationConfiguraiton operation. +// No longer used, see the PutBucketNotificationConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5473,8 +5677,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat output = &PutBucketNotificationConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5549,15 +5752,13 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R output = &PutBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketPolicy API operation for Amazon Simple Storage Service. // -// Replaces a policy on a bucket. If the bucket already has a policy, the one -// in this request completely replaces it. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5626,15 +5827,14 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req output = &PutBucketReplicationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketReplication API operation for Amazon Simple Storage Service. // // Creates a replication configuration or replaces an existing one. For more -// information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) // in the Amazon S3 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5704,8 +5904,7 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) output = &PutBucketRequestPaymentOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5784,8 +5983,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request output = &PutBucketTaggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5860,8 +6058,7 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r output = &PutBucketVersioningOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5937,8 +6134,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request output = &PutBucketWebsiteOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -6128,274 +6324,497 @@ func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, return out, req.Send() } -const opPutObjectTagging = "PutObjectTagging" +const opPutObjectLegalHold = "PutObjectLegalHold" -// PutObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectTagging operation. The "output" return +// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLegalHold operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutObjectTagging for more information on using the PutObjectTagging +// See PutObjectLegalHold for more information on using the PutObjectLegalHold // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutObjectTaggingRequest method. -// req, resp := client.PutObjectTaggingRequest(params) +// // Example sending a request using the PutObjectLegalHoldRequest method. +// req, resp := client.PutObjectLegalHoldRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { op := &request.Operation{ - Name: opPutObjectTagging, + Name: opPutObjectLegalHold, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?tagging", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", } if input == nil { - input = &PutObjectTaggingInput{} + input = &PutObjectLegalHoldInput{} } - output = &PutObjectTaggingOutput{} + output = &PutObjectLegalHoldOutput{} req = c.newRequest(op, input, output) return } -// PutObjectTagging API operation for Amazon Simple Storage Service. +// PutObjectLegalHold API operation for Amazon Simple Storage Service. // -// Sets the supplied tag-set to an object that already exists in a bucket +// Applies a Legal Hold configuration to the specified object. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) +// API operation PutObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) return out, req.Send() } -// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of // the ability to pass a context and additional request options. // -// See PutObjectTagging for details on how to use this API operation. +// See PutObjectLegalHold for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) +func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutPublicAccessBlock = "PutPublicAccessBlock" +const opPutObjectLockConfiguration = "PutObjectLockConfiguration" -// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the PutPublicAccessBlock operation. The "output" return +// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLockConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutPublicAccessBlockRequest method. -// req, resp := client.PutPublicAccessBlockRequest(params) +// // Example sending a request using the PutObjectLockConfigurationRequest method. +// req, resp := client.PutObjectLockConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock -func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { op := &request.Operation{ - Name: opPutPublicAccessBlock, + Name: opPutObjectLockConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?publicAccessBlock", + HTTPPath: "/{Bucket}?object-lock", } if input == nil { - input = &PutPublicAccessBlockInput{} + input = &PutObjectLockConfigurationInput{} } - output = &PutPublicAccessBlockOutput{} + output = &PutObjectLockConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Creates or modifies the Public Access Block configuration for an Amazon S3 -// bucket. +// Places an object lock configuration on the specified bucket. The rule specified +// in the object lock configuration will be applied by default to every new +// object placed in the specified bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutPublicAccessBlock for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock -func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { - req, out := c.PutPublicAccessBlockRequest(input) +// API operation PutObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) return out, req.Send() } -// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of // the ability to pass a context and additional request options. // -// See PutPublicAccessBlock for details on how to use this API operation. +// See PutObjectLockConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { - req, out := c.PutPublicAccessBlockRequest(input) +func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRestoreObject = "RestoreObject" +const opPutObjectRetention = "PutObjectRetention" -// RestoreObjectRequest generates a "aws/request.Request" representing the -// client's request for the RestoreObject operation. The "output" return +// PutObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectRetention operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RestoreObject for more information on using the RestoreObject +// See PutObjectRetention for more information on using the PutObjectRetention // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RestoreObjectRequest method. -// req, resp := client.RestoreObjectRequest(params) +// // Example sending a request using the PutObjectRetentionRequest method. +// req, resp := client.PutObjectRetentionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { op := &request.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", + Name: opPutObjectRetention, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?retention", } if input == nil { - input = &RestoreObjectInput{} + input = &PutObjectRetentionInput{} } - output = &RestoreObjectOutput{} + output = &PutObjectRetentionOutput{} req = c.newRequest(op, input, output) return } -// RestoreObject API operation for Amazon Simple Storage Service. +// PutObjectRetention API operation for Amazon Simple Storage Service. // -// Restores an archived copy of an object back into Amazon S3 +// Places an Object Retention configuration on an object. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation RestoreObject for usage and error information. -// -// Returned Error Codes: -// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) +// API operation PutObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) return out, req.Send() } -// RestoreObjectWithContext is the same as RestoreObject with the addition of +// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of // the ability to pass a context and additional request options. // -// See RestoreObject for details on how to use this API operation. +// See PutObjectRetention for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) +func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opSelectObjectContent = "SelectObjectContent" +const opPutObjectTagging = "PutObjectTagging" -// SelectObjectContentRequest generates a "aws/request.Request" representing the -// client's request for the SelectObjectContent operation. The "output" return +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SelectObjectContent for more information on using the SelectObjectContent +// See PutObjectTagging for more information on using the PutObjectTagging // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SelectObjectContentRequest method. -// req, resp := client.SelectObjectContentRequest(params) +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent -func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { op := &request.Operation{ - Name: opSelectObjectContent, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", } if input == nil { - input = &SelectObjectContentInput{} + input = &PutObjectTaggingInput{} } - output = &SelectObjectContentOutput{} + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} req = c.newRequest(op, input, output) req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) @@ -6594,13 +7013,16 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp return out, req.Send() } -// Specifies the days since the initiation of an Incomplete Multipart Upload -// that Lifecycle will wait before permanently removing all parts of the upload. +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon Simple Storage Service Developer Guide. type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` - // Indicates the number of days that must pass since initiation for Lifecycle - // to abort an Incomplete Multipart Upload. + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. DaysAfterInitiation *int64 `type:"integer"` } @@ -6621,11 +7043,15 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI } type AbortMultipartUploadInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + // Name of the bucket to which the multipart upload was initiated. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Key of the object for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -6635,6 +7061,8 @@ type AbortMultipartUploadInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Upload ID that identifies the multipart upload. + // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -6655,6 +7083,9 @@ func (s *AbortMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -6726,10 +7157,13 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart return s } +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. type AccelerateConfiguration struct { _ struct{} `type:"structure"` - // The accelerate configuration of the bucket. + // Specifies the transfer acceleration status of the bucket. Status *string `type:"string" enum:"BucketAccelerateStatus"` } @@ -6749,12 +7183,14 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { return s } +// Contains the elements that set the ACL permissions for an object per grantee. type AccessControlPolicy struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -6804,7 +7240,9 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { type AccessControlTranslation struct { _ struct{} `type:"structure"` - // The override value for the owner of the replica object. + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon Simple Storage Service API Reference. // // Owner is a required field Owner *string `type:"string" required:"true" enum:"OwnerOverride"` @@ -6839,10 +7277,14 @@ func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation return s } +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. type AnalyticsAndOperator struct { _ struct{} `type:"structure"` - // The prefix to use when evaluating an AND predicate. + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. Prefix *string `type:"string"` // The list of tags to use when evaluating an AND predicate. @@ -6891,6 +7333,11 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { return s } +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. +// +// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html) +// in the Amazon Simple Storage Service API Reference. type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -6899,13 +7346,13 @@ type AnalyticsConfiguration struct { // If no filter is provided, all objects will be considered in any analysis. Filter *AnalyticsFilter `type:"structure"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `type:"string" required:"true"` - // If present, it indicates that data related to access patterns will be collected - // and made available to analyze the tradeoffs between different storage classes. + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. // // StorageClassAnalysis is a required field StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` @@ -6965,6 +7412,7 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis return s } +// Where to publish the analytics results. type AnalyticsExportDestination struct { _ struct{} `type:"structure"` @@ -7073,7 +7521,7 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` - // The Amazon resource name (ARN) of the bucket to which data is exported. + // The Amazon Resource Name (ARN) of the bucket to which data is exported. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -7082,13 +7530,12 @@ type AnalyticsS3BucketDestination struct { // the owner will not be validated prior to exporting data. BucketAccountId *string `type:"string"` - // The file format used when exporting data to Amazon S3. + // Specifies the file format used when exporting data to Amazon S3. // // Format is a required field Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` - // The prefix to use when exporting data. The exported data begins with this - // prefix. + // The prefix to use when exporting data. The prefix is prepended to all results. Prefix *string `type:"string"` } @@ -7181,9 +7628,14 @@ func (s *Bucket) SetName(v string) *Bucket { return s } +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // // Rules is a required field Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -7230,9 +7682,10 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec type BucketLoggingStatus struct { _ struct{} `type:"structure"` - // Container for logging information. Presence of this element indicates that - // logging is enabled. Parameters TargetBucket and TargetPrefix are required - // in this case. + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -7267,9 +7720,15 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin return s } +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. type CORSConfiguration struct { _ struct{} `type:"structure"` + // A set of allowed origins and methods. + // // CORSRules is a required field CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` } @@ -7313,14 +7772,18 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { return s } +// Specifies a cross-origin access rule for an Amazon S3 bucket. type CORSRule struct { _ struct{} `type:"structure"` - // Specifies which headers are allowed in a pre-flight OPTIONS request. + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - // Identifies HTTP methods that the domain/origin specified in the rule is allowed - // to execute. + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. // // AllowedMethods is a required field AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` @@ -7621,7 +8084,7 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { } type CompleteMultipartUploadInput struct { - _ struct{} `type:"structure" payload:"MultipartUpload"` + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -7657,6 +8120,9 @@ func (s *CompleteMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -7732,7 +8198,7 @@ type CompleteMultipartUploadOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -7868,6 +8334,7 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } +// Specifies a condition that must be met for a redirect to apply. type Condition struct { _ struct{} `type:"structure"` @@ -7937,7 +8404,7 @@ func (s *ContinuationEvent) UnmarshalEvent( } type CopyObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` @@ -7987,7 +8454,7 @@ type CopyObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -8019,6 +8486,15 @@ type CopyObjectInput struct { // with metadata provided in the request. MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + // Specifies whether you want to apply a Legal Hold to the copied object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode that you want to apply to the copied object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want the copied object's object lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. // Documentation on downloading objects from requester pays buckets can be found @@ -8033,18 +8509,23 @@ type CopyObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -8084,6 +8565,9 @@ func (s *CopyObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.CopySource == nil { invalidParams.Add(request.NewErrParamRequired("CopySource")) } @@ -8252,6 +8736,24 @@ func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetRequestPayer sets the RequestPayer field's value. func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { s.RequestPayer = &v @@ -8283,6 +8785,12 @@ func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { s.SSEKMSKeyId = &v @@ -8343,9 +8851,14 @@ type CopyObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -8401,6 +8914,12 @@ func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { s.SSEKMSKeyId = &v @@ -8485,7 +9004,7 @@ type CreateBucketConfiguration struct { _ struct{} `type:"structure"` // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket will be created in US Standard. + // a region, the bucket is created in US East (N. Virginia) Region (us-east-1). LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -8506,7 +9025,7 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke } type CreateBucketInput struct { - _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` @@ -8531,6 +9050,10 @@ type CreateBucketInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want Amazon S3 object lock to be enabled for the new + // bucket. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` } // String returns the string representation @@ -8549,6 +9072,9 @@ func (s *CreateBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8611,6 +9137,12 @@ func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { return s } +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + type CreateBucketOutput struct { _ struct{} `type:"structure"` @@ -8634,7 +9166,7 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { } type CreateMultipartUploadInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` @@ -8680,6 +9212,15 @@ type CreateMultipartUploadInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + // Specifies whether you want to apply a Legal Hold to the uploaded object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Specifies the object lock mode that you want to apply to the uploaded object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // Specifies the date and time when you want the object lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. // Documentation on downloading objects from requester pays buckets can be found @@ -8694,18 +9235,23 @@ type CreateMultipartUploadInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -8739,6 +9285,9 @@ func (s *CreateMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -8831,15 +9380,33 @@ func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipart return s } -// SetKey sets the Key field's value. -func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { - s.Key = &v +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { + s.ObjectLockMode = &v return s } -// SetMetadata sets the Metadata field's value. -func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { - s.Metadata = v +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { + s.ObjectLockRetainUntilDate = &v return s } @@ -8874,6 +9441,12 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMulti return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { s.SSEKMSKeyId = &v @@ -8934,9 +9507,14 @@ type CreateMultipartUploadOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -9005,6 +9583,12 @@ func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMult return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { s.SSEKMSKeyId = &v @@ -9023,6 +9607,50 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } +// The container element for specifying the default object lock retention settings +// for new objects placed in the specified bucket. +type DefaultRetention struct { + _ struct{} `type:"structure"` + + // The number of days that you want to specify for the default retention period. + Days *int64 `type:"integer"` + + // The default object lock retention mode you want to apply to new objects placed + // in the specified bucket. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The number of years that you want to specify for the default retention period. + Years *int64 `type:"integer"` +} + +// String returns the string representation +func (s DefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultRetention) GoString() string { + return s.String() +} + +// SetDays sets the Days field's value. +func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { + s.Days = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *DefaultRetention) SetMode(v string) *DefaultRetention { + s.Mode = &v + return s +} + +// SetYears sets the Years field's value. +func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { + s.Years = &v + return s +} + type Delete struct { _ struct{} `type:"structure"` @@ -9080,14 +9708,14 @@ func (s *Delete) SetQuiet(v bool) *Delete { } type DeleteBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` // The name of the bucket from which an analytics configuration is deleted. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -9109,6 +9737,9 @@ func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -9153,7 +9784,7 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { } type DeleteBucketCorsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9175,6 +9806,9 @@ func (s *DeleteBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9210,7 +9844,7 @@ func (s DeleteBucketCorsOutput) GoString() string { } type DeleteBucketEncryptionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` // The name of the bucket containing the server-side encryption configuration // to delete. @@ -9235,6 +9869,9 @@ func (s *DeleteBucketEncryptionInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9270,7 +9907,7 @@ func (s DeleteBucketEncryptionOutput) GoString() string { } type DeleteBucketInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9292,6 +9929,9 @@ func (s *DeleteBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9313,7 +9953,7 @@ func (s *DeleteBucketInput) getBucket() (v string) { } type DeleteBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` // The name of the bucket containing the inventory configuration to delete. // @@ -9342,6 +9982,9 @@ func (s *DeleteBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -9386,7 +10029,7 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { } type DeleteBucketLifecycleInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9408,6 +10051,9 @@ func (s *DeleteBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9443,7 +10089,7 @@ func (s DeleteBucketLifecycleOutput) GoString() string { } type DeleteBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` // The name of the bucket containing the metrics configuration to delete. // @@ -9472,6 +10118,9 @@ func (s *DeleteBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -9530,7 +10179,7 @@ func (s DeleteBucketOutput) GoString() string { } type DeleteBucketPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9552,6 +10201,9 @@ func (s *DeleteBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9587,7 +10239,7 @@ func (s DeleteBucketPolicyOutput) GoString() string { } type DeleteBucketReplicationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` // The bucket name. // @@ -9614,6 +10266,9 @@ func (s *DeleteBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9649,7 +10304,7 @@ func (s DeleteBucketReplicationOutput) GoString() string { } type DeleteBucketTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9671,6 +10326,9 @@ func (s *DeleteBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9706,7 +10364,7 @@ func (s DeleteBucketTaggingOutput) GoString() string { } type DeleteBucketWebsiteInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9728,6 +10386,9 @@ func (s *DeleteBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9849,11 +10510,15 @@ func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { } type DeleteObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions + // to process this operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -9887,6 +10552,9 @@ func (s *DeleteObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -9913,6 +10581,12 @@ func (s *DeleteObjectInput) getBucket() (v string) { return *s.Bucket } +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { + s.BypassGovernanceRetention = &v + return s +} + // SetKey sets the Key field's value. func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { s.Key = &v @@ -9982,7 +10656,7 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { } type DeleteObjectTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10010,6 +10684,9 @@ func (s *DeleteObjectTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -10072,11 +10749,16 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO } type DeleteObjectsInput struct { - _ struct{} `type:"structure" payload:"Delete"` + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies whether you want to delete this object even if it has a Governance-type + // object lock in place. You must have sufficient permissions to perform this + // operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -10107,6 +10789,9 @@ func (s *DeleteObjectsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Delete == nil { invalidParams.Add(request.NewErrParamRequired("Delete")) } @@ -10135,6 +10820,12 @@ func (s *DeleteObjectsInput) getBucket() (v string) { return *s.Bucket } +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { + s.BypassGovernanceRetention = &v + return s +} + // SetDelete sets the Delete field's value. func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { s.Delete = v @@ -10194,10 +10885,9 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { } type DeletePublicAccessBlockInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` - // The Amazon S3 bucket whose Public Access Block configuration you want to - // delete. + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10219,6 +10909,9 @@ func (s *DeletePublicAccessBlockInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10299,33 +10992,33 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { return s } -// A container for information about the replication destination. +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket. type Destination struct { _ struct{} `type:"structure"` - // A container for information about access control for replicas. - // - // Use this element only in a cross-account scenario where source and destination - // bucket owners are not the same to change replica ownership to the AWS account - // that owns the destination bucket. If you don't add this element to the replication - // configuration, the replicas are owned by same AWS account that owns the source - // object. + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the AWS account that owns the destination bucket. If this is not specified + // in the replication configuration, the replicas are owned by same AWS account + // that owns the source object. AccessControlTranslation *AccessControlTranslation `type:"structure"` - // The account ID of the destination bucket. Currently, Amazon S3 verifies this - // value only if Access Control Translation is enabled. - // - // In a cross-account scenario, if you change replica ownership to the AWS account - // that owns the destination bucket by adding the AccessControlTranslation element, - // this is the account ID of the owner of the destination bucket. + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the AWS account that owns + // the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Cross-Region Replication Additional Configuration: Change Replica Owner + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in + // the Amazon Simple Storage Service Developer Guide. Account *string `type:"string"` // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to // store replicas of the object identified by the rule. // - // If there are multiple rules in your replication configuration, all rules - // must specify the same bucket as the destination. A replication configuration - // can replicate objects to only one destination bucket. + // A replication configuration can replicate objects to only one destination + // bucket. If there are multiple rules in your replication configuration, all + // rules must specify the same destination bucket. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -10334,8 +11027,13 @@ type Destination struct { // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // The class of storage used to store the object. By default Amazon S3 uses - // storage class of the source object when creating a replica. + // The storage class to use when replicating objects, such as standard or reduced + // redundancy. By default, Amazon S3 uses the storage class of the source object + // to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon Simple Storage Service API Reference. StorageClass *string `type:"string" enum:"StorageClass"` } @@ -10421,7 +11119,7 @@ type Encryption struct { // If the encryption type is aws:kms, this optional value specifies the AWS // KMS key ID to use for encryption of job results. - KMSKeyId *string `type:"string"` + KMSKeyId *string `type:"string" sensitive:"true"` } // String returns the string representation @@ -10465,13 +11163,13 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption { return s } -// A container for information about the encryption-based configuration for -// replicas. +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. type EncryptionConfiguration struct { _ struct{} `type:"structure"` - // The ID of the AWS KMS key for the AWS Region where the destination bucket - // resides. Amazon S3 uses this key to encrypt the replica object. + // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket. + // Amazon S3 uses this key to encrypt replica objects. ReplicaKmsKeyID *string `type:"string"` } @@ -10604,18 +11302,19 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { return s } -// A container for a key value pair that defines the criteria for the filter -// rule. +// Specifies the Amazon S3 object key name to filter on and whether to filter +// on the suffix or prefix of the key name. type FilterRule struct { _ struct{} `type:"structure"` // The object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. The maximum prefix length is 1,024 characters. - // Overlapping prefixes and suffixes are not supported. For more information, - // see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string" enum:"FilterRuleName"` + // The value that the filter searches for in object key names. Value *string `type:"string"` } @@ -10642,7 +11341,7 @@ func (s *FilterRule) SetValue(v string) *FilterRule { } type GetBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` // Name of the bucket for which the accelerate configuration is retrieved. // @@ -10666,6 +11365,9 @@ func (s *GetBucketAccelerateConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10710,7 +11412,7 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA } type GetBucketAclInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10732,6 +11434,9 @@ func (s *GetBucketAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10784,14 +11489,14 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { } type GetBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` // The name of the bucket from which an analytics configuration is retrieved. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -10813,6 +11518,9 @@ func (s *GetBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -10866,7 +11574,7 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana } type GetBucketCorsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10888,6 +11596,9 @@ func (s *GetBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10931,7 +11642,7 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { } type GetBucketEncryptionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` // The name of the bucket from which the server-side encryption configuration // is retrieved. @@ -10956,6 +11667,9 @@ func (s *GetBucketEncryptionInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10979,8 +11693,7 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) { type GetBucketEncryptionOutput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` - // Container for server-side encryption configuration rules. Currently S3 supports - // one rule only. + // Specifies the default server-side-encryption configuration. ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` } @@ -11001,7 +11714,7 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv } type GetBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` // The name of the bucket containing the inventory configuration to retrieve. // @@ -11030,6 +11743,9 @@ func (s *GetBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -11083,7 +11799,7 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv } type GetBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11105,6 +11821,9 @@ func (s *GetBucketLifecycleConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11148,7 +11867,7 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge } type GetBucketLifecycleInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11170,6 +11889,9 @@ func (s *GetBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11213,7 +11935,7 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput } type GetBucketLocationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11235,6 +11957,9 @@ func (s *GetBucketLocationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11278,7 +12003,7 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca } type GetBucketLoggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11300,6 +12025,9 @@ func (s *GetBucketLoggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11323,9 +12051,10 @@ func (s *GetBucketLoggingInput) getBucket() (v string) { type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` - // Container for logging information. Presence of this element indicates that - // logging is enabled. Parameters TargetBucket and TargetPrefix are required - // in this case. + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -11346,7 +12075,7 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket } type GetBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` // The name of the bucket containing the metrics configuration to retrieve. // @@ -11375,6 +12104,9 @@ func (s *GetBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -11428,7 +12160,7 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics } type GetBucketNotificationConfigurationRequest struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` // Name of the bucket to get the notification configuration for. // @@ -11452,6 +12184,9 @@ func (s *GetBucketNotificationConfigurationRequest) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11473,7 +12208,7 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { } type GetBucketPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11495,6 +12230,9 @@ func (s *GetBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11539,9 +12277,9 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { } type GetBucketPolicyStatusInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` - // The name of the Amazon S3 bucket whose public-policy status you want to retrieve. + // The name of the Amazon S3 bucket whose policy status you want to retrieve. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11563,6 +12301,9 @@ func (s *GetBucketPolicyStatusInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11586,7 +12327,7 @@ func (s *GetBucketPolicyStatusInput) getBucket() (v string) { type GetBucketPolicyStatusOutput struct { _ struct{} `type:"structure" payload:"PolicyStatus"` - // The public-policy status for this bucket. + // The policy status for the specified bucket. PolicyStatus *PolicyStatus `type:"structure"` } @@ -11607,7 +12348,7 @@ func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucke } type GetBucketReplicationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11629,6 +12370,9 @@ func (s *GetBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11674,7 +12418,7 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC } type GetBucketRequestPaymentInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11696,6 +12440,9 @@ func (s *GetBucketRequestPaymentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11740,7 +12487,7 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym } type GetBucketTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11762,6 +12509,9 @@ func (s *GetBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11806,7 +12556,7 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { } type GetBucketVersioningInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11828,6 +12578,9 @@ func (s *GetBucketVersioningInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11883,7 +12636,7 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp } type GetBucketWebsiteInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11905,6 +12658,9 @@ func (s *GetBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11932,6 +12688,8 @@ type GetBucketWebsiteOutput struct { IndexDocument *IndexDocument `type:"structure"` + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` @@ -11972,7 +12730,7 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb } type GetObjectAclInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12006,6 +12764,9 @@ func (s *GetObjectAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -12092,7 +12853,7 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { } type GetObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12157,7 +12918,7 @@ type GetObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -12184,6 +12945,9 @@ func (s *GetObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -12282,46 +13046,226 @@ func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { return s } -// SetResponseContentType sets the ResponseContentType field's value. -func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { - s.ResponseContentType = &v - return s +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +type GetObjectLegalHoldInput struct { + _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` + + // The bucket containing the object whose Legal Hold status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The key name for the object whose Legal Hold status you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object whose Legal Hold status you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { + s.VersionId = &v + return s +} + +type GetObjectLegalHoldOutput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + + // The current Legal Hold status for the specified object. + LegalHold *ObjectLockLegalHold `type:"structure"` +} + +// String returns the string representation +func (s GetObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetLegalHold sets the LegalHold field's value. +func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { + s.LegalHold = v + return s +} + +type GetObjectLockConfigurationInput struct { + _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` + + // The bucket whose object lock configuration you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) } -// SetResponseExpires sets the ResponseExpires field's value. -func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { - s.ResponseExpires = &v - return s +// GoString returns the string representation +func (s GetObjectLockConfigurationInput) GoString() string { + return s.String() } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { - s.SSECustomerAlgorithm = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { - s.SSECustomerKey = &v +// SetBucket sets the Bucket field's value. +func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { + s.Bucket = &v return s } -func (s *GetObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { +func (s *GetObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { return v } - return *s.SSECustomerKey + return *s.Bucket } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { - s.SSECustomerKeyMD5 = &v - return s +type GetObjectLockConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The specified bucket's object lock configuration. + ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` } -// SetVersionId sets the VersionId field's value. -func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { - s.VersionId = &v +// String returns the string representation +func (s GetObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { + s.ObjectLockConfiguration = v return s } @@ -12385,6 +13329,16 @@ type GetObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's object lock will expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` @@ -12410,7 +13364,7 @@ type GetObjectOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -12536,6 +13490,24 @@ func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetPartsCount sets the PartsCount field's value. func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { s.PartsCount = &v @@ -12608,8 +13580,117 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput return s } +type GetObjectRetentionInput struct { + _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` + + // The bucket containing the object whose retention settings you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The key name for the object whose retention settings you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *GetObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v + return s +} + +type GetObjectRetentionOutput struct { + _ struct{} `type:"structure" payload:"Retention"` + + // The container element for an object's retention settings. + Retention *ObjectLockRetention `type:"structure"` +} + +// String returns the string representation +func (s GetObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRetention sets the Retention field's value. +func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { + s.Retention = v + return s +} + type GetObjectTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12636,6 +13717,9 @@ func (s *GetObjectTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -12706,7 +13790,7 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput } type GetObjectTorrentInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12737,6 +13821,9 @@ func (s *GetObjectTorrentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -12808,10 +13895,10 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu } type GetPublicAccessBlockInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` - // The name of the Amazon S3 bucket whose Public Access Block configuration - // you want to retrieve. + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12833,6 +13920,9 @@ func (s *GetPublicAccessBlockInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -12856,8 +13946,8 @@ func (s *GetPublicAccessBlockInput) getBucket() (v string) { type GetPublicAccessBlockOutput struct { _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` - // The Public Access Block configuration currently in effect for this Amazon - // S3 bucket. + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` } @@ -13036,7 +14126,7 @@ func (s *Grantee) SetURI(v string) *Grantee { } type HeadBucketInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13058,6 +14148,9 @@ func (s *HeadBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -13093,7 +14186,7 @@ func (s HeadBucketOutput) GoString() string { } type HeadObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13141,7 +14234,7 @@ type HeadObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -13168,6 +14261,9 @@ func (s *HeadObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -13327,6 +14423,15 @@ type HeadObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + // The Legal Hold status for the specified object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's object lock expires. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` @@ -13352,7 +14457,7 @@ type HeadObjectOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -13463,6 +14568,24 @@ func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetPartsCount sets the PartsCount field's value. func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { s.PartsCount = &v @@ -13655,6 +14778,9 @@ func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { return s } +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon Simple Storage Service API Reference. type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -13672,12 +14798,16 @@ type InventoryConfiguration struct { // Id is a required field Id *string `type:"string" required:"true"` - // Specifies which object version(s) to included in the inventory results. + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. // // IncludedObjectVersions is a required field IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` - // Specifies whether the inventory is enabled or disabled. + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. // // IsEnabled is a required field IsEnabled *bool `type:"boolean" required:"true"` @@ -14120,11 +15250,15 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { type LambdaFunctionConfiguration struct { _ struct{} `type:"structure"` + // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For + // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // A container for object key name filtering rules. For information about key - // name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -14132,8 +15266,8 @@ type LambdaFunctionConfiguration struct { // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` - // The Amazon Resource Name (ARN) of the Lambda cloud function that Amazon S3 - // can invoke when it detects events of the specified type. + // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 + // invokes when the specified event type occurs. // // LambdaFunctionArn is a required field LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` @@ -14284,8 +15418,11 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp type LifecycleRule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` Expiration *LifecycleExpiration `type:"structure"` @@ -14307,7 +15444,7 @@ type LifecycleRule struct { NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` // Prefix identifying one or more objects to which the rule applies. This is - // deprecated; use Filter instead. + // No longer used; use Filter instead. // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -14524,7 +15661,7 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { } type ListBucketAnalyticsConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` // The name of the bucket from which analytics configurations are retrieved. // @@ -14552,6 +15689,9 @@ func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14633,7 +15773,7 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str } type ListBucketInventoryConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` // The name of the bucket containing the inventory configurations to retrieve. // @@ -14663,6 +15803,9 @@ func (s *ListBucketInventoryConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14744,7 +15887,7 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str } type ListBucketMetricsConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` // The name of the bucket containing the metrics configurations to retrieve. // @@ -14774,6 +15917,9 @@ func (s *ListBucketMetricsConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14901,7 +16047,7 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { } type ListMultipartUploadsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14952,6 +16098,9 @@ func (s *ListMultipartUploadsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -15142,7 +16291,7 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti } type ListObjectVersionsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -15188,6 +16337,9 @@ func (s *ListObjectVersionsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -15372,7 +16524,7 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe } type ListObjectsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -15420,6 +16572,9 @@ func (s *ListObjectsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -15581,7 +16736,7 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { } type ListObjectsV2Input struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` // Name of the bucket to list. // @@ -15637,6 +16792,9 @@ func (s *ListObjectsV2Input) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -15839,7 +16997,7 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { } type ListPartsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListPartsRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -15882,6 +17040,9 @@ func (s *ListPartsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -16218,9 +17379,10 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { return s } -// Container for logging information. Presence of this element indicates that -// logging is enabled. Parameters TargetBucket and TargetPrefix are required -// in this case. +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon Simple Storage Service API Reference. type LoggingEnabled struct { _ struct{} `type:"structure"` @@ -16236,8 +17398,9 @@ type LoggingEnabled struct { TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - // This element lets you specify a prefix for the keys that the log files will - // be stored under. + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. // // TargetPrefix is a required field TargetPrefix *string `type:"string" required:"true"` @@ -16380,6 +17543,13 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { return s } +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PUT Bucket metrics +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// in the Amazon Simple Storage Service API Reference. type MetricsConfiguration struct { _ struct{} `type:"structure"` @@ -16575,8 +17745,8 @@ type NoncurrentVersionExpiration struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in - // the Amazon Simple Storage Service Developer Guide. + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` } @@ -16597,19 +17767,20 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers } // Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA, ONEZONE_IA, or GLACIER storage class. If your -// bucket is versioning-enabled (or versioning is suspended), you can set this -// action to request that Amazon S3 transition noncurrent object versions to -// the STANDARD_IA, ONEZONE_IA, or GLACIER storage class at a specific period -// in the object's lifetime. +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, +// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning +// is suspended), you can set this action to request that Amazon S3 transition +// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, +// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's +// lifetime. type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in - // the Amazon Simple Storage Service Developer Guide. + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` // The class of storage used to store the object. @@ -16643,10 +17814,16 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi type NotificationConfiguration struct { _ struct{} `type:"structure"` + // Describes the AWS Lambda functions to invoke and the events for which to + // invoke them. LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + // The topic to which notifications are sent and the events for which notifications + // are generated. TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` } @@ -16756,8 +17933,8 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf return s } -// A container for object key name filtering rules. For information about key -// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -16883,15 +18060,130 @@ func (s *ObjectIdentifier) Validate() error { return nil } -// SetKey sets the Key field's value. -func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { - s.Key = &v +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// The container element for object lock configuration parameters. +type ObjectLockConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether this bucket has an object lock configuration enabled. + ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` + + // The object lock rule in place for the specified object. + Rule *ObjectLockRule `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockConfiguration) GoString() string { + return s.String() +} + +// SetObjectLockEnabled sets the ObjectLockEnabled field's value. +func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { + s.ObjectLockEnabled = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { + s.Rule = v + return s +} + +// A Legal Hold configuration for an object. +type ObjectLockLegalHold struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object has a Legal Hold in place. + Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` +} + +// String returns the string representation +func (s ObjectLockLegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockLegalHold) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { + s.Status = &v + return s +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + _ struct{} `type:"structure"` + + // Indicates the Retention mode for the specified object. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The date on which this object lock retention expires. + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ObjectLockRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRetention) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { + s.Mode = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { - s.VersionId = &v +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { + s.RetainUntilDate = &v + return s +} + +// The container element for an object lock rule. +type ObjectLockRule struct { + _ struct{} `type:"structure"` + + // The default retention period that you want to apply to new objects placed + // in the specified bucket. + DefaultRetention *DefaultRetention `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRule) GoString() string { + return s.String() +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { + s.DefaultRetention = v return s } @@ -17147,12 +18439,12 @@ func (s *Part) SetSize(v int64) *Part { return s } -// The container element for this bucket's public-policy status. +// The container element for a bucket's policy status. type PolicyStatus struct { _ struct{} `type:"structure"` - // The public-policy status for this bucket. TRUE indicates that this bucket - // is public. FALSE indicates that the bucket is not public. + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. IsPublic *bool `locationName:"IsPublic" type:"boolean"` } @@ -17253,75 +18545,45 @@ func (s *ProgressEvent) UnmarshalEvent( return nil } -// The container element for all Public Access Block configuration options. -// You can enable the configuration options in any combination. -// -// Amazon S3 considers a bucket policy public unless at least one of the following -// conditions is true: -// -// The policy limits access to a set of CIDRs using aws:SourceIp. For more information -// on CIDR, see http://www.rfc-editor.org/rfc/rfc4632.txt (http://www.rfc-editor.org/rfc/rfc4632.txt) -// -// The policy grants permissions, not including any "bad actions," to one of -// the following: -// -// A fixed AWS principal, user, role, or service principal -// -// A fixed aws:SourceArn -// -// A fixed aws:SourceVpc -// -// A fixed aws:SourceVpce -// -// A fixed aws:SourceOwner -// -// A fixed aws:SourceAccount -// -// A fixed value of s3:x-amz-server-side-encryption-aws-kms-key-id -// -// A fixed value of aws:userid outside the pattern "AROLEID:*" -// -// "Bad actions" are those that could expose the data inside a bucket to reads -// or writes by the public. These actions are s3:Get*, s3:List*, s3:AbortMultipartUpload, -// s3:Delete*, s3:Put*, and s3:RestoreObject. -// -// The star notation for bad actions indicates that all matching operations -// are considered bad actions. For example, because s3:Get* is a bad action, -// s3:GetObject, s3:GetObjectVersion, and s3:GetObjectAcl are all bad actions. +// Specifies the Block Public Access configuration for an Amazon S3 bucket. type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` - // Specifies whether Amazon S3 should block public ACLs for this bucket. Setting - // this element to TRUEcauses the following behavior: + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // public. // - // PUT Bucket acl and PUT Object acl calls will fail if the specified ACL allows - // public access. + // * PUT Object calls fail if the request includes a public ACL. // - // * PUT Object calls will fail if the request includes an object ACL. + // Enabling this setting doesn't affect existing policies or ACLs. BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` // Specifies whether Amazon S3 should block public bucket policies for this // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to // PUT Bucket policy if the specified bucket policy allows public access. // - // Note that enabling this setting doesn't affect existing bucket policies. + // Enabling this setting doesn't affect existing bucket policies. BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` - // Specifies whether Amazon S3 should ignore public ACLs for this bucket. Setting - // this element to TRUE causes Amazon S3 to ignore all public ACLs on this bucket - // and any objects that it contains. + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. // - // Note that enabling this setting doesn't affect the persistence of any existing - // ACLs and doesn't prevent new public ACLs from being set. + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` // Specifies whether Amazon S3 should restrict public bucket policies for this - // bucket. If this element is set to TRUE, then only the bucket owner and AWS - // Services can access this bucket if it has a public policy. + // bucket. Setting this element to TRUE restricts access to this bucket to only + // AWS services and authorized users within this account if the bucket has a + // public policy. // - // Note that enabling this setting doesn't affect previously stored bucket policies, - // except that public and cross-account access within any public bucket policy, - // including non-public delegation to specific accounts, is blocked. + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` } @@ -17360,7 +18622,7 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi } type PutBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` // Specifies the Accelerate Configuration you want to set for the bucket. // @@ -17392,6 +18654,9 @@ func (s *PutBucketAccelerateConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -17433,11 +18698,12 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string { } type PutBucketAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field @@ -17476,6 +18742,9 @@ func (s *PutBucketAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.AccessControlPolicy != nil { if err := s.AccessControlPolicy.Validate(); err != nil { invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) @@ -17558,7 +18827,7 @@ func (s PutBucketAclOutput) GoString() string { } type PutBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` // The configuration and any analyses for the analytics filter. // @@ -17570,7 +18839,7 @@ type PutBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -17595,6 +18864,9 @@ func (s *PutBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -17650,11 +18922,16 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { } type PutBucketCorsInput struct { - _ struct{} `type:"structure" payload:"CORSConfiguration"` + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // Simple Storage Service Developer Guide. + // // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -17675,6 +18952,9 @@ func (s *PutBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.CORSConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) } @@ -17724,16 +19004,18 @@ func (s PutBucketCorsOutput) GoString() string { } type PutBucketEncryptionInput struct { - _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` - // The name of the bucket for which the server-side encryption configuration - // is set. + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information + // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Container for server-side encryption configuration rules. Currently S3 supports - // one rule only. + // Specifies the default server-side-encryption configuration. // // ServerSideEncryptionConfiguration is a required field ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -17755,6 +19037,9 @@ func (s *PutBucketEncryptionInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.ServerSideEncryptionConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) } @@ -17804,7 +19089,7 @@ func (s PutBucketEncryptionOutput) GoString() string { } type PutBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` + _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` // The name of the bucket where the inventory configuration will be stored. // @@ -17838,6 +19123,9 @@ func (s *PutBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -17896,11 +19184,14 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string { } type PutBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. + // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) + // in the Amazon Simple Storage Service Developer Guide. LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -17920,6 +19211,9 @@ func (s *PutBucketLifecycleConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.LifecycleConfiguration != nil { if err := s.LifecycleConfiguration.Validate(); err != nil { invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) @@ -17966,7 +19260,7 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string { } type PutBucketLifecycleInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -17990,6 +19284,9 @@ func (s *PutBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.LifecycleConfiguration != nil { if err := s.LifecycleConfiguration.Validate(); err != nil { invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) @@ -18036,7 +19333,7 @@ func (s PutBucketLifecycleOutput) GoString() string { } type PutBucketLoggingInput struct { - _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18061,6 +19358,9 @@ func (s *PutBucketLoggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.BucketLoggingStatus == nil { invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) } @@ -18110,7 +19410,7 @@ func (s PutBucketLoggingOutput) GoString() string { } type PutBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure" payload:"MetricsConfiguration"` + _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` // The name of the bucket for which the metrics configuration is set. // @@ -18144,6 +19444,9 @@ func (s *PutBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -18202,7 +19505,7 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { } type PutBucketNotificationConfigurationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18230,6 +19533,9 @@ func (s *PutBucketNotificationConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.NotificationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } @@ -18279,7 +19585,7 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { } type PutBucketNotificationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18304,6 +19610,9 @@ func (s *PutBucketNotificationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.NotificationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } @@ -18348,7 +19657,7 @@ func (s PutBucketNotificationOutput) GoString() string { } type PutBucketPolicyInput struct { - _ struct{} `type:"structure" payload:"Policy"` + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18379,6 +19688,9 @@ func (s *PutBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Policy == nil { invalidParams.Add(request.NewErrParamRequired("Policy")) } @@ -18429,7 +19741,7 @@ func (s PutBucketPolicyOutput) GoString() string { } type PutBucketReplicationInput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18439,6 +19751,9 @@ type PutBucketReplicationInput struct { // // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // A token that allows Amazon S3 object lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } // String returns the string representation @@ -18457,6 +19772,9 @@ func (s *PutBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.ReplicationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) } @@ -18491,6 +19809,12 @@ func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationCo return s } +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -18506,7 +19830,7 @@ func (s PutBucketReplicationOutput) GoString() string { } type PutBucketRequestPaymentInput struct { - _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18531,6 +19855,9 @@ func (s *PutBucketRequestPaymentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.RequestPaymentConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) } @@ -18580,7 +19907,7 @@ func (s PutBucketRequestPaymentOutput) GoString() string { } type PutBucketTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18605,6 +19932,9 @@ func (s *PutBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Tagging == nil { invalidParams.Add(request.NewErrParamRequired("Tagging")) } @@ -18654,7 +19984,7 @@ func (s PutBucketTaggingOutput) GoString() string { } type PutBucketVersioningInput struct { - _ struct{} `type:"structure" payload:"VersioningConfiguration"` + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18663,6 +19993,10 @@ type PutBucketVersioningInput struct { // and the value that is displayed on your authentication device. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + // Describes the versioning state of an Amazon S3 bucket. For more information, + // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) + // in the Amazon Simple Storage Service API Reference. + // // VersioningConfiguration is a required field VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -18683,6 +20017,9 @@ func (s *PutBucketVersioningInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.VersioningConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) } @@ -18733,11 +20070,13 @@ func (s PutBucketVersioningOutput) GoString() string { } type PutBucketWebsiteInput struct { - _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies website configuration parameters for an Amazon S3 bucket. + // // WebsiteConfiguration is a required field WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -18758,6 +20097,9 @@ func (s *PutBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.WebsiteConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) } @@ -18807,11 +20149,12 @@ func (s PutBucketWebsiteOutput) GoString() string { } type PutObjectAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field @@ -18862,6 +20205,9 @@ func (s *PutObjectAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -18978,7 +20324,7 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { } type PutObjectInput struct { - _ struct{} `type:"structure" payload:"Body"` + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` @@ -19009,7 +20355,9 @@ type PutObjectInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameted is required + // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // A standard MIME type describing the format of the object data. @@ -19038,6 +20386,15 @@ type PutObjectInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + // The Legal Hold status that you want to apply to the specified object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's object lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. // Documentation on downloading objects from requester pays buckets can be found @@ -19052,18 +20409,23 @@ type PutObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -19098,6 +20460,9 @@ func (s *PutObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -19196,88 +20561,334 @@ func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { return s } -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { - s.GrantReadACP = &v - return s +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +type PutObjectLegalHoldInput struct { + _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` + + // The bucket containing the object that you want to place a Legal Hold on. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The key name for the object that you want to place a Legal Hold on. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container element for the Legal Hold configuration you want to apply to the + // specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object that you want to place a Legal Hold on. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetLegalHold sets the LegalHold field's value. +func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { + s.LegalHold = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { + s.VersionId = &v + return s +} + +type PutObjectLegalHoldOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldOutput) GoString() string { + return s.String() } -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { - s.GrantWriteACP = &v +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { + s.RequestCharged = &v return s } -// SetKey sets the Key field's value. -func (s *PutObjectInput) SetKey(v string) *PutObjectInput { - s.Key = &v - return s +type PutObjectLockConfigurationInput struct { + _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` + + // The bucket whose object lock configuration you want to create or replace. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The object lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // A token to allow Amazon S3 object lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } -// SetMetadata sets the Metadata field's value. -func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { - s.Metadata = v - return s +// String returns the string representation +func (s PutObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) } -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { - s.RequestPayer = &v - return s +// GoString returns the string representation +func (s PutObjectLockConfigurationInput) GoString() string { + return s.String() } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { - s.SSECustomerAlgorithm = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { - s.SSECustomerKey = &v +// SetBucket sets the Bucket field's value. +func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { + s.Bucket = &v return s } -func (s *PutObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { +func (s *PutObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { return v } - return *s.SSECustomerKey + return *s.Bucket } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { - s.SSECustomerKeyMD5 = &v +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { + s.ObjectLockConfiguration = v return s } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { - s.SSEKMSKeyId = &v +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { + s.RequestPayer = &v return s } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { - s.ServerSideEncryption = &v +// SetToken sets the Token field's value. +func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { + s.Token = &v return s } -// SetStorageClass sets the StorageClass field's value. -func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { - s.StorageClass = &v - return s +type PutObjectLockConfigurationOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// SetTagging sets the Tagging field's value. -func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { - s.Tagging = &v - return s +// String returns the string representation +func (s PutObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) } -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { - s.WebsiteRedirectLocation = &v +// GoString returns the string representation +func (s PutObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { + s.RequestCharged = &v return s } @@ -19305,9 +20916,14 @@ type PutObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -19357,6 +20973,12 @@ func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { s.SSEKMSKeyId = &v @@ -19375,8 +20997,139 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { return s } +type PutObjectRetentionInput struct { + _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` + + // The bucket that contains the object you want to apply this Object Retention + // configuration to. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether this operation should bypass Governance-mode restrictions.j + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The container element for the Object Retention configuration. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *PutObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetRetention sets the Retention field's value. +func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { + s.Retention = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { + s.VersionId = &v + return s +} + +type PutObjectRetentionOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { + s.RequestCharged = &v + return s +} + type PutObjectTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19406,6 +21159,9 @@ func (s *PutObjectTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -19481,16 +21237,19 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput } type PutPublicAccessBlockInput struct { - _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` - // The name of the Amazon S3 bucket whose Public Access Block configuration - // you want to set. + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The Public Access Block configuration that you want to apply to this Amazon - // S3 bucket. + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon Simple Storage Service Developer Guide. // // PublicAccessBlockConfiguration is a required field PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -19512,6 +21271,9 @@ func (s *PutPublicAccessBlockInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.PublicAccessBlockConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) } @@ -19555,17 +21317,16 @@ func (s PutPublicAccessBlockOutput) GoString() string { return s.String() } -// A container for specifying the configuration for publication of messages -// to an Amazon Simple Queue Service (Amazon SQS) queue.when Amazon S3 detects -// specified events. +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. type QueueConfiguration struct { _ struct{} `type:"structure"` // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // A container for object key name filtering rules. For information about key - // name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -19574,7 +21335,7 @@ type QueueConfiguration struct { Id *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 - // will publish a message when it detects events of the specified type. + // publishes a message when it detects events of the specified type. // // QueueArn is a required field QueueArn *string `locationName:"Queue" type:"string" required:"true"` @@ -19720,6 +21481,8 @@ func (s *RecordsEvent) UnmarshalEvent( return nil } +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. type Redirect struct { _ struct{} `type:"structure"` @@ -19730,8 +21493,8 @@ type Redirect struct { // siblings is present. HttpRedirectCode *string `type:"string"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. Protocol *string `type:"string" enum:"Protocol"` // The object key prefix to use in the redirect request. For example, to redirect @@ -19743,7 +21506,7 @@ type Redirect struct { ReplaceKeyPrefixWith *string `type:"string"` // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the sibling is present. Can + // request to error.html. Not required if one of the siblings is present. Can // be present only if ReplaceKeyPrefixWith is not provided. ReplaceKeyWith *string `type:"string"` } @@ -19788,16 +21551,18 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { return s } +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. type RedirectAllRequestsTo struct { _ struct{} `type:"structure"` - // Name of the host where requests will be redirected. + // Name of the host where requests are redirected. // // HostName is a required field HostName *string `type:"string" required:"true"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. Protocol *string `type:"string" enum:"Protocol"` } @@ -19842,7 +21607,9 @@ type ReplicationConfiguration struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) role that Amazon S3 can assume when replicating the objects. + // (IAM) role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html) + // in the Amazon Simple Storage Service Developer Guide. // // Role is a required field Role *string `type:"string" required:"true"` @@ -19902,7 +21669,7 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo return s } -// A container for information about a specific replication rule. +// Specifies which Amazon S3 objects to replicate and where to store the replicas. type ReplicationRule struct { _ struct{} `type:"structure"` @@ -19922,7 +21689,8 @@ type ReplicationRule struct { ID *string `type:"string"` // An object keyname prefix that identifies the object or objects to which the - // rule applies. The maximum prefix length is 1,024 characters. + // rule applies. The maximum prefix length is 1,024 characters. To include all + // objects in a bucket, specify an empty string. // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -19938,7 +21706,7 @@ type ReplicationRule struct { // * Same object qualify tag based filter criteria specified in multiple // rules // - // For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) + // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) // in the Amazon S3 Developer Guide. Priority *int64 `type:"integer"` @@ -19947,12 +21715,9 @@ type ReplicationRule struct { // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using // an AWS KMS-Managed Key (SSE-KMS). - // - // If you want Amazon S3 to replicate objects created with server-side encryption - // using AWS KMS-Managed Keys. SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` - // If status isn't enabled, the rule is ignored. + // Specifies whether the rule is enabled. // // Status is a required field Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` @@ -20234,7 +21999,7 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { } type RestoreObjectInput struct { - _ struct{} `type:"structure" payload:"RestoreRequest"` + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20270,6 +22035,9 @@ func (s *RestoreObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -20464,6 +22232,7 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { return s } +// Specifies the redirect behavior and when a redirect is applied. type RoutingRule struct { _ struct{} `type:"structure"` @@ -20516,16 +22285,22 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { return s } +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon Simple Storage Service API Reference. type Rule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` Expiration *LifecycleExpiration `type:"structure"` - // Unique identifier for the rule. The value cannot be longer than 255 characters. + // Unique identifier for the rule. The value can't be longer than 255 characters. ID *string `type:"string"` // Specifies when noncurrent object versions expire. Upon expiration, Amazon @@ -20536,24 +22311,27 @@ type Rule struct { NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA, ONEZONE_IA, or GLACIER storage class. If your - // bucket is versioning-enabled (or versioning is suspended), you can set this - // action to request that Amazon S3 transition noncurrent object versions to - // the STANDARD_IA, ONEZONE_IA, or GLACIER storage class at a specific period - // in the object's lifetime. + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, + // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning + // is suspended), you can set this action to request that Amazon S3 transition + // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, + // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's + // lifetime. NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - // Prefix identifying one or more objects to which the rule applies. + // Object key prefix that identifies one or more objects to which this rule + // applies. // // Prefix is a required field Prefix *string `type:"string" required:"true"` - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. // // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // Specifies when an object transitions to a specified storage class. Transition *Transition `type:"structure"` } @@ -20639,7 +22417,7 @@ type SSEKMS struct { // key to use for encrypting Inventory reports. // // KeyId is a required field - KeyId *string `type:"string" required:"true"` + KeyId *string `type:"string" required:"true" sensitive:"true"` } // String returns the string representation @@ -20788,7 +22566,7 @@ type SelectObjectContentEventStreamReader interface { // HTTP this will also close the HTTP connection. Close() error - // Returns any error that has occured while reading from the event stream. + // Returns any error that has occurred while reading from the event stream. Err() error } @@ -20912,7 +22690,7 @@ func (r *readSelectObjectContentEventStream) unmarshalerForEventType( // Amazon S3 uses this to parse object data into records. It returns only records // that match the specified SQL expression. You must also specify the data serialization // format for the response. For more information, see S3Select API Documentation -// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). type SelectObjectContentInput struct { _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -20949,16 +22727,16 @@ type SelectObjectContentInput struct { // Specifies if periodic request progress information should be enabled. RequestProgress *RequestProgress `type:"structure"` - // The SSE Algorithm used to encrypt the object. For more information, see - // Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The SSE Algorithm used to encrypt the object. For more information, see Server-Side + // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // The SSE Customer Key. For more information, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + // The SSE Customer Key. For more information, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - // The SSE Customer Key MD5. For more information, see Server-Side Encryption - // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The SSE Customer Key MD5. For more information, see Server-Side Encryption + // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` } @@ -20978,6 +22756,9 @@ func (s *SelectObjectContentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Expression == nil { invalidParams.Add(request.NewErrParamRequired("Expression")) } @@ -21201,14 +22982,16 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec } // Describes the default server-side encryption to apply to new objects in the -// bucket. If Put Object request does not specify any server-side encryption, -// this default encryption will be applied. +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. For more information, see PUT Bucket +// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon Simple Storage Service API Reference. type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` // KMS master key ID to use for the default encryption. This parameter is allowed - // if SSEAlgorithm is aws:kms. - KMSMasterKeyID *string `type:"string"` + // if and only if SSEAlgorithm is set to aws:kms. + KMSMasterKeyID *string `type:"string" sensitive:"true"` // Server-side encryption algorithm to use for the default encryption. // @@ -21251,8 +23034,7 @@ func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEnc return s } -// Container for server-side encryption configuration rules. Currently S3 supports -// one rule only. +// Specifies the default server-side-encryption configuration. type ServerSideEncryptionConfiguration struct { _ struct{} `type:"structure"` @@ -21302,13 +23084,12 @@ func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRu return s } -// Container for information about a particular server-side encryption configuration -// rule. +// Specifies the default server-side encryption configuration. type ServerSideEncryptionRule struct { _ struct{} `type:"structure"` - // Describes the default server-side encryption to apply to new objects in the - // bucket. If Put Object request does not specify any server-side encryption, + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, // this default encryption will be applied. ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` } @@ -21344,13 +23125,17 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv return s } -// A container for filters that define which source objects should be replicated. +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// an AWS KMS-Managed Key (SSE-KMS). type SourceSelectionCriteria struct { _ struct{} `type:"structure"` - // A container for filter information for the selection of S3 objects encrypted - // with AWS KMS. If you include SourceSelectionCriteria in the replication configuration, - // this element is required. + // A container for filter information for the selection of Amazon S3 objects + // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication + // configuration, this element is required. SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` } @@ -21390,8 +23175,8 @@ func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedOb type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` - // If the status is not Enabled, replication for S3 objects encrypted with AWS - // KMS is disabled. + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using an AWS KMS-managed key. // // Status is a required field Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` @@ -21507,11 +23292,14 @@ func (s *StatsEvent) UnmarshalEvent( return nil } +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. type StorageClassAnalysis struct { _ struct{} `type:"structure"` - // A container used to describe how data related to the storage class analysis - // should be exported. + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. DataExport *StorageClassAnalysisDataExport `type:"structure"` } @@ -21751,16 +23539,20 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { } // A container for specifying the configuration for publication of messages -// to an Amazon Simple Notification Service (Amazon SNS) topic.when Amazon S3 +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 // detects specified events. type TopicConfiguration struct { _ struct{} `type:"structure"` + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // A container for object key name filtering rules. For information about key - // name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -21769,7 +23561,7 @@ type TopicConfiguration struct { Id *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 - // will publish a message when it detects events of the specified type. + // publishes a message when it detects events of the specified type. // // TopicArn is a required field TopicArn *string `locationName:"Topic" type:"string" required:"true"` @@ -21878,18 +23670,19 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } +// Specifies when an object transitions to a specified storage class. type Transition struct { _ struct{} `type:"structure"` - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. Days *int64 `type:"integer"` - // The class of storage used to store the object. + // The storage class to which you want the object to transition. StorageClass *string `type:"string" enum:"TransitionStorageClass"` } @@ -21922,7 +23715,7 @@ func (s *Transition) SetStorageClass(v string) *Transition { } type UploadPartCopyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -21950,7 +23743,7 @@ type UploadPartCopyInput struct { // the form bytes=first-last, where the first and last are the zero-based byte // offsets to copy. For example, bytes=0-9 indicates that you want to copy the // first ten bytes of the source. You can copy a range only if the source object - // is greater than 5 GB. + // is greater than 5 MB. CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` // Specifies the algorithm to use when decrypting the source object (e.g., AES256). @@ -21959,7 +23752,7 @@ type UploadPartCopyInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -21990,7 +23783,7 @@ type UploadPartCopyInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -22019,6 +23812,9 @@ func (s *UploadPartCopyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.CopySource == nil { invalidParams.Add(request.NewErrParamRequired("CopySource")) } @@ -22189,7 +23985,7 @@ type UploadPartCopyOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -22249,7 +24045,7 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy } type UploadPartInput struct { - _ struct{} `type:"structure" payload:"Body"` + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` // Object data. Body io.ReadSeeker `type:"blob"` @@ -22263,7 +24059,9 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameted is required + // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // Object key for which the multipart upload was initiated. @@ -22292,7 +24090,7 @@ type UploadPartInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -22321,6 +24119,9 @@ func (s *UploadPartInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -22442,7 +24243,7 @@ type UploadPartOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -22495,6 +24296,9 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { return s } +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon Simple Storage Service API Reference. type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -22529,15 +24333,22 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { return s } +// Specifies website configuration parameters for an Amazon S3 bucket. type WebsiteConfiguration struct { _ struct{} `type:"structure"` + // The name of the error document for the website. ErrorDocument *ErrorDocument `type:"structure"` + // The name of the index document for the website. IndexDocument *IndexDocument `type:"structure"` + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -22749,6 +24560,12 @@ const ( // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + + // EventS3ObjectRestorePost is a Event enum value + EventS3ObjectRestorePost = "s3:ObjectRestore:Post" + + // EventS3ObjectRestoreCompleted is a Event enum value + EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" ) const ( @@ -22789,6 +24606,9 @@ const ( // InventoryFormatOrc is a InventoryFormat enum value InventoryFormatOrc = "ORC" + + // InventoryFormatParquet is a InventoryFormat enum value + InventoryFormatParquet = "Parquet" ) const ( @@ -22828,6 +24648,15 @@ const ( // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" + + // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + + // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockMode = "ObjectLockMode" + + // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" ) const ( @@ -22885,6 +24714,35 @@ const ( ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" ) +const ( + // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value + ObjectLockEnabledEnabled = "Enabled" +) + +const ( + // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOn = "ON" + + // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOff = "OFF" +) + +const ( + // ObjectLockModeGovernance is a ObjectLockMode enum value + ObjectLockModeGovernance = "GOVERNANCE" + + // ObjectLockModeCompliance is a ObjectLockMode enum value + ObjectLockModeCompliance = "COMPLIANCE" +) + +const ( + // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeGovernance = "GOVERNANCE" + + // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeCompliance = "COMPLIANCE" +) + const ( // ObjectStorageClassStandard is a ObjectStorageClass enum value ObjectStorageClassStandard = "STANDARD" @@ -22900,6 +24758,12 @@ const ( // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value + ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( @@ -23024,6 +24888,15 @@ const ( // StorageClassOnezoneIa is a StorageClass enum value StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" + + // StorageClassDeepArchive is a StorageClass enum value + StorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( @@ -23059,6 +24932,12 @@ const ( // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go index bc68a46a..9ba8a788 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -80,7 +80,8 @@ func buildGetBucketLocation(r *request.Request) { out := r.Data.(*GetBucketLocationOutput) b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed reading response body", err) + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 6f560a40..23d386b1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -17,7 +17,8 @@ func defaultInitClientFn(c *client.Client) { // Require SSL when using SSE keys c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeys) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.Clear() @@ -33,6 +34,7 @@ func defaultInitRequestFn(r *request.Request) { switch r.Operation.Name { case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, + opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration, opPutBucketReplication: // These S3 operations require Content-MD5 to be set r.Handlers.Build.PushBack(contentMD5) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go index 39b912c2..4b65f715 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -63,6 +63,20 @@ // See the s3manager package's Downloader type documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader // +// Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// // Get Bucket Region // // GetBucketRegion will attempt to get the region for a bucket using a region diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index 8010c4fa..b71c835d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -3,6 +3,7 @@ package s3 import ( "crypto/md5" "encoding/base64" + "net/http" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -30,25 +31,54 @@ func validateSSERequiresSSL(r *request.Request) { } } -func computeSSEKeys(r *request.Request) { - headers := []string{ - "x-amz-server-side-encryption-customer-key", - "x-amz-copy-source-server-side-encryption-customer-key", +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() } - for _, h := range headers { - md5h := h + "-md5" - if key := r.HTTPRequest.Header.Get(h); key != "" { - // Base64-encode the value - b64v := base64.StdEncoding.EncodeToString([]byte(key)) - r.HTTPRequest.Header.Set(h, b64v) - - // Add MD5 if it wasn't computed - if r.HTTPRequest.Header.Get(md5h) == "" { - sum := md5.Sum([]byte(key)) - b64sum := base64.StdEncoding.EncodeToString(sum[:]) - r.HTTPRequest.Header.Set(md5h, b64sum) - } + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return } + + // In backwards compatiable, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index fde3050f..f6a69aed 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -14,7 +14,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "unable to read response body", err), + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -31,7 +31,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { unmarshalError(r) if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == "SerializationError" { + if err.Code() == request.ErrCodeSerialization { r.Error = nil return } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index 12c0612c..5b63fac7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) type xmlErrorResponse struct { @@ -26,40 +27,50 @@ func unmarshalError(r *request.Request) { // Bucket exists in a different region, and request needs // to be made to the correct region. if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + msg := fmt.Sprintf( + "incorrect region, the bucket is not in '%s' region at endpoint '%s'", + aws.StringValue(r.Config.Region), + aws.StringValue(r.Config.Endpoint), + ) + if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { + msg += fmt.Sprintf(", bucket is in '%s' region", v) + } r.Error = awserr.NewRequestFailure( - awserr.New("BucketRegionError", - fmt.Sprintf("incorrect region, the bucket is not in '%s' region", - aws.StringValue(r.Config.Region)), - nil), + awserr.New("BucketRegionError", msg, nil), r.HTTPResponse.StatusCode, r.RequestID, ) return } - var errCode, errMsg string - // Attempt to parse error from body if it is known - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - errCode = "SerializationError" - errMsg = "failed to decode S3 XML error response" - } else { - errCode = resp.Code - errMsg = resp.Message + var errResp xmlErrorResponse + err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + if err == io.EOF { + // Only capture the error if an unmarshal error occurs that is not EOF, + // because S3 might send an error without a error message which causes + // the XML unmarshal to fail with EOF. err = nil } + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } // Fallback to status code converted to message if still no error code - if len(errCode) == 0 { + if len(errResp.Code) == 0 { statusText := http.StatusText(r.HTTPResponse.StatusCode) - errCode = strings.Replace(statusText, " ", "", -1) - errMsg = statusText + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText } r.Error = awserr.NewRequestFailure( - awserr.New(errCode, errMsg, err), + awserr.New(errResp.Code, errResp.Message, err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index ee908f91..eb0a6a41 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -3,10 +3,12 @@ package sts import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" ) @@ -54,38 +56,26 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // AssumeRole API operation for AWS Security Token Service. // -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) that you can use to access -// AWS resources that you might not normally have access to. Typically, you -// use AssumeRole for cross-account access or federation. For a comparison of -// AssumeRole with the other APIs that produce temporary credentials, see Requesting -// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Returns a set of temporary security credentials that you can use to access +// AWS resources that you might not normally have access to. These temporary +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use AssumeRole within your account or for cross-account +// access. For a comparison of AssumeRole with other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// Important: You cannot call AssumeRole by using AWS root account credentials; -// access is denied. You must use credentials for an IAM user or an IAM role -// to call AssumeRole. +// You cannot use AWS account root user credentials to call AssumeRole. You +// must use credentials for an IAM user or an IAM role to call AssumeRole. // // For cross-account access, imagine that you own multiple accounts and need // to access resources in each account. You could create long-term credentials // in each account to access those resources. However, managing all those credentials // and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account and -// then use temporary security credentials to access all the other accounts +// Instead, you can create one set of long-term credentials in one account. +// Then use temporary security credentials to access all the other accounts // by assuming roles in those accounts. For more information about roles, see -// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) -// in the IAM User Guide. -// -// For federation, you can, for example, grant single sign-on access to the -// AWS Management Console. If you already have an identity and authentication -// system in your corporate network, you don't have to recreate user identities -// in AWS in order to grant those user identities access to AWS. Instead, after -// a user has been authenticated, you call AssumeRole (and specify the role -// with the appropriate permissions) to get temporary security credentials for -// that user. With those temporary security credentials, you construct a sign-in -// URL that users can use to access the console. For more information, see Common -// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) // in the IAM User Guide. // // By default, the temporary security credentials created by AssumeRole last @@ -94,69 +84,73 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // seconds (15 minutes) up to the maximum session duration setting for the role. // This setting can have a value from 1 hour to 12 hours. To learn how to view // the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI operations but -// does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // // The temporary security credentials created by AssumeRole can be used to make -// API calls to any AWS service with the following exception: you cannot call -// the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to further -// restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// API calls to any AWS service with the following exception: You cannot call +// the AWS STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// To assume a role, your AWS account must be trusted by the role. The trust -// relationship is defined in the role's trust policy when the role is created. -// That trust policy states which accounts are allowed to delegate access to -// this account's role. -// -// The user who wants to access the role must also have permissions delegated -// from the role's administrator. If the user is in a different account than -// the role, then the user's administrator must attach a policy that allows -// the user to call AssumeRole on the ARN of the role in the other account. -// If the user is in the same account as the role, then you can either attach -// a policy to the user (identical to the previous different account user), -// or you can add the user as a principal directly in the role's trust policy. -// In this case, the trust policy acts as the only resource-based policy in -// IAM, and users in the same account as the role do not need explicit permission -// to assume the role. For more information about trust policies and resource-based -// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// To assume a role from a different account, your AWS account must be trusted +// by the role. The trust relationship is defined in the role's trust policy +// when the role is created. That trust policy states which accounts are allowed +// to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. If the user is in the same account as the +// role, then you can do either of the following: +// +// * Attach a policy to the user (identical to the previous user in a different +// account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// In this case, the trust policy acts as an IAM resource-based policy. Users +// in the same account as the role do not need explicit permission to assume +// the role. For more information about trust policies and resource-based policies, +// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // // Using MFA with AssumeRole // -// You can optionally include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios in which -// you want to make sure that the user who is assuming the role has been authenticated -// using an AWS MFA device. In that scenario, the trust policy of the role being -// assumed includes a condition that tests for MFA authentication; if the caller -// does not include valid MFA information, the request to assume the role is -// denied. The condition in a trust policy that tests for MFA authentication -// might look like the following example. +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an AWS MFA +// device. In that scenario, the trust policy of the role being assumed includes +// a condition that tests for MFA authentication. If the caller does not include +// valid MFA information, the request to assume the role is denied. The condition +// in a trust policy that tests for MFA authentication might look like the following +// example. // // "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} // -// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) // in the IAM User Guide guide. // // To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode // parameters. The SerialNumber value identifies the user's hardware or virtual // MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA devices produces. +// the MFA device produces. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -179,7 +173,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole @@ -243,6 +237,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re output = &AssumeRoleWithSAMLOutput{} req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials return } @@ -252,9 +247,9 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // via a SAML authentication response. This operation provides a mechanism for // tying an enterprise identity store or directory to role-based AWS access // without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML -// with the other APIs that produce temporary credentials, see Requesting Temporary -// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this operation consist of @@ -269,37 +264,36 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session // duration setting for the role. This setting can have a value from 1 hour // to 12 hours. To learn how to view the maximum value for your role, see View -// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI operations but -// does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot -// call the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by the intersection of both the access policy -// of the role that is being assumed, and the policy that you pass. This means -// that both policies must grant the permission for the action to be allowed. -// This gives you a way to further restrict the permissions for the resulting -// temporary security credentials. You cannot use the passed policy to grant -// permissions that are in excess of those allowed by the access policy of the -// role that is being assumed. For more information, see Permissions for AssumeRole, -// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Before your application can call AssumeRoleWithSAML, you must configure your // SAML identity provider (IdP) to issue the claims required by AWS. Additionally, // you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider, and create -// an IAM role that specifies this SAML provider in its trust policy. +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. // // Calling AssumeRoleWithSAML does not require the use of AWS security credentials. // The identity of the caller is validated by using keys in the metadata document @@ -313,16 +307,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // For more information, see the following resources: // -// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) // in the IAM User Guide. // -// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) // in the IAM User Guide. // -// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) // in the IAM User Guide. // -// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -361,7 +355,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML @@ -425,41 +419,42 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI output = &AssumeRoleWithWebIdentityOutput{} req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials return } // AssumeRoleWithWebIdentity API operation for AWS Security Token Service. // // Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider, such as Amazon -// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible -// identity provider. +// in a mobile or web application with a web identity provider. Example providers +// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID +// Connect-compatible identity provider. // // For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) -// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely -// identify a user and supply the user with a consistent identity throughout -// the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview -// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) // in the AWS SDK for iOS Developer Guide. // // Calling AssumeRoleWithWebIdentity does not require the use of AWS security // credentials. Therefore, you can distribute an application (for example, on // mobile devices) that requests temporary security credentials without including -// long-term AWS credentials in the application, and without deploying server-based -// proxy services that use long-term AWS credentials. Instead, the identity -// of the caller is validated by using a token from the web identity provider. -// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce -// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// long-term AWS credentials in the application. You also don't need to deploy +// server-based proxy services that use long-term AWS credentials. Instead, +// the identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this API consist of an access // key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to AWS service APIs. +// temporary security credentials to sign calls to AWS service API operations. // // By default, the temporary security credentials created by AssumeRoleWithWebIdentity // last for one hour. However, you can use the optional DurationSeconds parameter @@ -467,29 +462,29 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // seconds (15 minutes) up to the maximum session duration setting for the role. // This setting can have a value from 1 hour to 12 hours. To learn how to view // the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI operations but -// does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: -// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to further -// restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Before your application can call AssumeRoleWithWebIdentity, you must have @@ -508,21 +503,19 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // For more information about how to use web identity federation and the AssumeRoleWithWebIdentity // API, see the following resources: // -// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to AWS. // -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). -// This interactive website lets you walk through the process of authenticating -// via Login with Amazon, Facebook, or Google, getting temporary security -// credentials, and then using those credentials to make a request to AWS. -// -// -// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android -// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample -// apps that show how to invoke the identity providers, and then how to use -// the information from these providers to get and use temporary security -// credentials. +// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and +// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers, and then how to use the information from these providers to +// get and use temporary security credentials. // // * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of @@ -572,7 +565,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity @@ -644,17 +637,17 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // Decodes additional information about the authorization status of a request // from an encoded message returned in response to an AWS request. // -// For example, if a user is not authorized to perform an action that he or -// she has requested, the request returns a Client.UnauthorizedOperation response -// (an HTTP 403 response). Some AWS actions additionally return an encoded message -// that can provide details about this authorization failure. +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some AWS operations additionally return +// an encoded message that can provide details about this authorization failure. // -// Only certain AWS actions return an encoded authorization message. The documentation -// for an individual action indicates whether that action returns an encoded -// message in addition to returning an HTTP code. +// Only certain AWS operations return an encoded authorization message. The +// documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. // // The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the action +// constitute privileged information that the user who requested the operation // should not see. To decode an authorization status message, a user must be // granted permissions via an IAM policy to request the DecodeAuthorizationMessage // (sts:DecodeAuthorizationMessage) action. @@ -663,7 +656,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // * Whether the request was denied due to an explicit deny or due to the // absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) // in the IAM User Guide. // // * The principal who made the request. @@ -709,6 +702,102 @@ func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *Deco return out, req.Send() } +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// AWS account to which the keys belong. Access key IDs beginning with AKIA +// are long-term credentials for an IAM user or the AWS account root user. Access +// key IDs beginning with ASIA are temporary credentials that are created using +// STS operations. If the account in the response belongs to you, you can sign +// in as the root user and review your root user access keys. Then, you can +// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html). +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the @@ -753,8 +842,15 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // GetCallerIdentity API operation for AWS Security Token Service. // -// Returns details about the IAM identity whose credentials are used to call -// the API. +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -831,81 +927,65 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // Returns a set of temporary security credentials (consisting of an access // key ID, a secret access key, and a security token) for a federated user. // A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. Because -// you must call the GetFederationToken action using the long-term security -// credentials of an IAM user, this call is appropriate in contexts where those +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those // credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other APIs that produce temporary -// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// If you are creating a mobile-based or browser-based app that can authenticate +// You can create a mobile-based or browser-based app that can authenticate // users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider, we recommend that you -// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. // For more information, see Federation Through a Web-based Identity Provider -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// The GetFederationToken action must be called by using the long-term AWS security -// credentials of an IAM user. You can also call GetFederationToken using the -// security credentials of an AWS root account, but we do not recommended it. -// Instead, we recommend that you create an IAM user for the purpose of the -// proxy application and then attach a policy to the IAM user that limits federated -// users to only the actions and resources that they need access to. For more -// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // -// The temporary security credentials that are obtained by using the long-term -// credentials of an IAM user are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default -// is 43200 seconds (12 hours). Temporary credentials that are obtained by using -// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// is 43,200 seconds (12 hours). Temporary credentials that are obtained by +// using AWS account root user credentials have a maximum duration of 3,600 +// seconds (1 hour). // // The temporary security credentials created by GetFederationToken can be used // to make API calls to any AWS service with the following exceptions: // -// * You cannot use these credentials to call any IAM APIs. +// * You cannot use these credentials to call any IAM API operations. // -// * You cannot call any STS APIs except GetCallerIdentity. +// * You cannot call any STS API operations except GetCallerIdentity. // // Permissions // -// The permissions for the temporary security credentials returned by GetFederationToken -// are determined by a combination of the following: -// -// * The policy or policies that are attached to the IAM user whose credentials -// are used to call GetFederationToken. -// -// * The policy that is passed as a parameter in the call. -// -// The passed policy is attached to the temporary security credentials that -// result from the GetFederationToken API call--that is, to the federated user. -// When the federated user makes an AWS request, AWS evaluates the policy attached -// to the federated user in combination with the policy or policies attached -// to the IAM user whose credentials were used to call GetFederationToken. AWS -// allows the federated user's request only when both the federated user and -// the IAM user are explicitly allowed to perform the requested action. The -// passed policy cannot grant more permissions than those that are defined in -// the IAM user policy. -// -// A typical use case is that the permissions of the IAM user whose credentials -// are used to call GetFederationToken are designed to allow access to all the -// actions and resources that any federated user will need. Then, for individual -// users, you pass a policy to the operation that scopes down the permissions -// to a level that's appropriate to that individual user, using a policy that -// allows only a subset of permissions that are granted to the IAM user. -// -// If you do not pass a policy, the resulting temporary security credentials -// have no effective permissions. The only exception is when the temporary security -// credentials are used to access a resource that has a resource-based policy -// that specifically allows the federated user to access the resource. -// -// For more information about how permissions work, see Permissions for GetFederationToken -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). -// For information about using GetFederationToken to create temporary security -// credentials, see GetFederationToken—Federation Through a Custom Identity -// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. The only exception +// is when the credentials are used to access a resource that has a resource-based +// policy that specifically references the federated user session in the Principal +// element of the policy. When you pass session policies, the session permissions +// are the intersection of the IAM user policies and the session policies that +// you pass. This gives you a way to further restrict the permissions for a +// federated user. You cannot use session policies to grant more permissions +// than those that are defined in the permissions policy of the IAM user. For +// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -928,7 +1008,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken @@ -1000,48 +1080,47 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // Returns a set of temporary credentials for an AWS account or IAM user. The // credentials consist of an access key ID, a secret access key, and a security // token. Typically, you use GetSessionToken if you want to use MFA to protect -// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled -// IAM users would need to call GetSessionToken and submit an MFA code that -// is associated with their MFA device. Using the temporary security credentials -// that are returned from the call, IAM users can then make programmatic calls -// to APIs that require MFA authentication. If you do not supply a correct MFA -// code, then the API returns an access denied error. For a comparison of GetSessionToken -// with the other APIs that produce temporary credentials, see Requesting Temporary -// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. +// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA +// code that is associated with their MFA device. Using the temporary security +// credentials that are returned from the call, IAM users can then make programmatic +// calls to API operations that require MFA authentication. If you do not supply +// a correct MFA code, then the API returns an access denied error. For a comparison +// of GetSessionToken with the other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// The GetSessionToken action must be called by using the long-term AWS security -// credentials of the AWS account or an IAM user. Credentials that are created -// by IAM users are valid for the duration that you specify, from 900 seconds -// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default -// of 43200 seconds (12 hours); credentials that are created by using account -// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 -// seconds (1 hour), with a default of 1 hour. +// The GetSessionToken operation must be called by using the long-term AWS security +// credentials of the AWS account root user or an IAM user. Credentials that +// are created by IAM users are valid for the duration that you specify. This +// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials +// based on account credentials can range from 900 seconds (15 minutes) up to +// 3,600 seconds (1 hour), with a default of 1 hour. // // The temporary security credentials created by GetSessionToken can be used // to make API calls to any AWS service with the following exceptions: // -// * You cannot call any IAM APIs unless MFA authentication information is -// included in the request. +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. // -// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with root account credentials. -// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// We recommend that you do not call GetSessionToken with AWS account root user +// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) // by creating one or more IAM users, giving them the necessary permissions, // and using IAM users for everyday interaction with AWS. // -// The permissions associated with the temporary security credentials returned -// by GetSessionToken are based on the permissions associated with account or -// IAM user whose credentials are used to call the action. If GetSessionToken -// is called using root account credentials, the temporary credentials have -// root account permissions. Similarly, if GetSessionToken is called using the -// credentials of an IAM user, the temporary credentials have the same permissions -// as the IAM user. +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using AWS account root user credentials, the +// temporary credentials have root user permissions. Similarly, if GetSessionToken +// is called using the credentials of an IAM user, the temporary credentials +// have the same permissions as the IAM user. // // For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1056,7 +1135,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken @@ -1091,7 +1170,7 @@ type AssumeRoleInput struct { // a session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum // value for your role, see View the Maximum Session Duration Setting for a - // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1101,51 +1180,77 @@ type AssumeRoleInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // A unique identifier that is used by third parties when assuming roles in - // their customers' accounts. For each role that the third party can assume, - // they should instruct their customers to ensure the role's trust policy checks - // for the external ID that the third party generated. Each time the third party - // assumes the role, they should pass the customer's external ID. The external - // ID is useful in order to help third parties bind a role to the customer who - // created it. For more information about the external ID, see How to Use an - // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your AWS Resources + // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) // in the IAM User Guide. // - // The regex used to validated this parameter is a string of characters consisting + // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can // also include underscores or any of the following characters: =,.@:/- ExternalId *string `min:"2" type:"string"` - // An IAM policy in JSON format. - // - // This parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both (the intersection of) the access policy of the role that - // is being assumed, and the policy that you pass. This gives you a way to further - // restrict the permissions for the resulting temporary security credentials. - // You cannot use the passed policy to grant permissions that are in excess - // of those allowed by the access policy of the role that is being assumed. - // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, - // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The Amazon Resource Name (ARN) of the role to assume. // // RoleArn is a required field @@ -1158,8 +1263,8 @@ type AssumeRoleInput struct { // scenarios, the role session name is visible to, and can be logged by the // account that owns the role. The role session name is also used in the ARN // of the assumed role principal. This means that subsequent cross-account API - // requests using the temporary security credentials will expose the role session - // name to the external account in their CloudTrail logs. + // requests that use the temporary security credentials will expose the role + // session name to the external account in their AWS CloudTrail logs. // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can @@ -1229,6 +1334,16 @@ func (s *AssumeRoleInput) Validate() error { if s.TokenCode != nil && len(*s.TokenCode) < 6 { invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1254,6 +1369,12 @@ func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { s.RoleArn = &v @@ -1293,10 +1414,8 @@ type AssumeRoleOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` // A percentage value that indicates the size of the policy in packed form. @@ -1346,7 +1465,7 @@ type AssumeRoleWithSAMLInput struct { // specify a session duration of 12 hours, but your administrator set the maximum // session duration to 6 hours, your operation fails. To learn how to view the // maximum value for your role, see View the Maximum Session Duration Setting - // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1356,36 +1475,60 @@ type AssumeRoleWithSAMLInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // An IAM policy in JSON format. - // - // The policy parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the access policy of the role that is being assumed. For more information, - // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes // the IdP. // @@ -1399,8 +1542,8 @@ type AssumeRoleWithSAMLInput struct { // The base-64 encoded SAML authentication response provided by the IdP. // - // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the Using IAM guide. + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. // // SAMLAssertion is a required field SAMLAssertion *string `min:"4" type:"string" required:"true"` @@ -1443,6 +1586,16 @@ func (s *AssumeRoleWithSAMLInput) Validate() error { if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1462,6 +1615,12 @@ func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + // SetPrincipalArn sets the PrincipalArn field's value. func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { s.PrincipalArn = &v @@ -1496,10 +1655,8 @@ type AssumeRoleWithSAMLOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` // The value of the Issuer element of the SAML assertion. @@ -1603,7 +1760,7 @@ type AssumeRoleWithWebIdentityInput struct { // a session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum // value for your role, see View the Maximum Session Duration Setting for a - // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1613,35 +1770,60 @@ type AssumeRoleWithWebIdentityInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // An IAM policy in JSON format. + // An IAM policy in JSON format that you want to use as an inline session policy. // - // The policy parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the access policy of the role that is being assumed. For more information, - // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The fully qualified host component of the domain name of the identity provider. // // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com @@ -1718,6 +1900,16 @@ func (s *AssumeRoleWithWebIdentityInput) Validate() error { if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1737,6 +1929,12 @@ func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebI return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + // SetProviderId sets the ProviderId field's value. func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { s.ProviderId = &v @@ -1781,10 +1979,8 @@ type AssumeRoleWithWebIdentityOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` // A percentage value that indicates the size of the policy in packed form. @@ -1793,7 +1989,7 @@ type AssumeRoleWithWebIdentityOutput struct { PackedPolicySize *int64 `type:"integer"` // The issuing authority of the web identity token presented. For OpenID Connect - // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access // tokens, this contains the value of the ProviderId parameter that was passed // in the AssumeRoleWithWebIdentity request. Provider *string `type:"string"` @@ -1860,7 +2056,7 @@ type AssumedRoleUser struct { // The ARN of the temporary security credentials that are returned from the // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in Using IAM. // // Arn is a required field @@ -2028,7 +2224,7 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the credentials. // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in Using IAM. // // Arn is a required field @@ -2063,6 +2259,73 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercased letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the AWS account. + Account *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2090,8 +2353,8 @@ type GetCallerIdentityOutput struct { Arn *string `min:"20" type:"string"` // The unique identifier of the calling entity. The exact value depends on the - // type of entity making the call. The values returned are those listed in the - // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) // found on the Policy Variables reference page in the IAM User Guide. UserId *string `type:"string"` } @@ -2128,12 +2391,11 @@ type GetFederationTokenInput struct { _ struct{} `type:"structure"` // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds - // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained - // using AWS account (root) credentials are restricted to a maximum of 3600 + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using AWS account root user credentials are restricted to a maximum of 3,600 // seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using AWS account (root) credentials defaults to one - // hour. + // session obtained by using root user credentials defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The name of the federated user. The name is used as an identifier for the @@ -2148,36 +2410,73 @@ type GetFederationTokenInput struct { // Name is a required field Name *string `min:"2" type:"string" required:"true"` - // An IAM policy in JSON format that is passed with the GetFederationToken call - // and evaluated along with the policy or policies that are attached to the - // IAM user whose credentials are used to call GetFederationToken. The passed - // policy is used to scope down the permissions that are available to the IAM - // user, by allowing only a subset of the permissions that are granted to the - // IAM user. The passed policy cannot grant more permissions than those granted - // to the IAM user. The final permissions for the federated user are the most - // restrictive set based on the intersection of the passed policy and the IAM - // user policy. - // - // If you do not pass a policy, the resulting temporary security credentials - // have no effective permissions. The only exception is when the temporary security - // credentials are used to access a resource that has a resource-based policy - // that specifically allows the federated user to access the resource. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. The only exception + // is when the credentials are used to access a resource that has a resource-based + // policy that specifically references the federated user session in the Principal + // element of the policy. // - // For more information about how permissions work, see Permissions for GetFederationToken - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plain text that you use for both inline + // and managed session policies shouldn't exceed 2048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. The only exception + // is when the credentials are used to access a resource that has a resource-based + // policy that specifically references the federated user session in the Principal + // element of the policy. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` } // String returns the string representation @@ -2205,6 +2504,16 @@ func (s *GetFederationTokenInput) Validate() error { if s.Policy != nil && len(*s.Policy) < 1 { invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2230,6 +2539,12 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. type GetFederationTokenOutput struct { @@ -2238,10 +2553,8 @@ type GetFederationTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` // Identifiers for the federated user associated with the credentials (such @@ -2288,11 +2601,11 @@ type GetSessionTokenInput struct { _ struct{} `type:"structure"` // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 - // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions - // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). - // If the duration is longer than one hour, the session for AWS account owners - // defaults to one hour. + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3,600 seconds (one + // hour). If the duration is longer than one hour, the session for AWS account + // owners defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The identification number of the MFA device that is associated with the IAM @@ -2303,16 +2616,16 @@ type GetSessionTokenInput struct { // You can find the device for an IAM user by going to the AWS Management Console // and viewing the user's security credentials. // - // The regex used to validated this parameter is a string of characters consisting + // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can // also include underscores or any of the following characters: =,.@:/- SerialNumber *string `min:"9" type:"string"` // The value provided by the MFA device, if MFA is required. If any policy requires // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, and the user does not provide a code when requesting a set of - // temporary security credentials, the user will receive an "access denied" - // response when requesting resources that require MFA authentication. + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. // // The format for this parameter, as described by its regex pattern, is a sequence // of six numeric digits. @@ -2374,10 +2687,8 @@ type GetSessionTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` } @@ -2396,3 +2707,44 @@ func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenO s.Credentials = v return s } + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go index 4010cc7f..d5307fca 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -3,10 +3,9 @@ package sts import "github.com/aws/aws-sdk-go/aws/request" func init() { - initRequest = func(r *request.Request) { - switch r.Operation.Name { - case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: - r.Handlers.Sign.Clear() // these operations are unsigned - } - } + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index ef681ab0..fcb720dc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -7,22 +7,14 @@ // request temporary, limited-privilege credentials for AWS Identity and Access // Management (IAM) users or for users that you authenticate (federated users). // This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to STS. For example, the SDKs take care -// of cryptographically signing requests, managing errors, and retrying requests -// automatically. For information about the AWS SDKs, including how to download -// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // // For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) // in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) // in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) // in the IAM User Guide. // // If you're new to AWS and need additional technical information about a specific @@ -31,14 +23,38 @@ // // Endpoints // -// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com -// that maps to the US East (N. Virginia) region. Additional regions are available -// and are activated by default. For more information, see Activating and Deactivating -// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// By default, AWS Security Token Service (STS) is available as a global service, +// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. +// Global requests map to the US East (N. Virginia) region. AWS recommends using +// Regional AWS STS endpoints instead of the global endpoint to reduce latency, +// build in redundancy, and increase session token validity. For more information, +// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Most AWS Regions are enabled for operations in all AWS services by default. +// Those Regions are automatically activated for use with AWS STS. Some Regions, +// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more +// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) +// in the AWS General Reference. When you enable these AWS Regions, they are +// automatically activated for use with AWS STS. You cannot activate the STS +// endpoint for a Region that is disabled. Tokens that are valid in all AWS +// Regions are longer than tokens that are valid in Regions that are enabled +// by default. Changing this setting might affect existing systems where you +// temporarily store tokens. For more information, see Managing Global Endpoint +// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) // in the IAM User Guide. // -// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) -// in the AWS General Reference. +// After you activate a Region for use with AWS STS, you can direct AWS STS +// API calls to that Region. AWS STS recommends that you provide both the Region +// and endpoint when you make calls to a Regional endpoint. You can provide +// the Region alone for manually enabled Regions, such as Asia Pacific (Hong +// Kong). In this case, the calls are directed to the STS Regional endpoint. +// However, if you provide the Region alone for Regions enabled by default, +// the calls are directed to the global endpoint of https://sts.amazonaws.com. +// +// To view the list of AWS STS endpoints and whether they are active by default, +// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) +// in the IAM User Guide. // // Recording API requests // @@ -46,8 +62,28 @@ // your AWS account and delivers log files to an Amazon S3 bucket. By using // information collected by CloudTrail, you can determine what requests were // successfully made to STS, who made the request, when it was made, and so -// on. To learn more about CloudTrail, including how to turn it on and find -// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// on. +// +// If you activate AWS STS endpoints in Regions other than the default global +// endpoint, then you must also turn on CloudTrail logging in those Regions. +// This is necessary to record any AWS STS API calls that are made in those +// Regions. For more information, see Turning On CloudTrail in Additional Regions +// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) +// in the AWS CloudTrail User Guide. +// +// AWS Security Token Service (STS) is a global service with a single endpoint +// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls +// to a global service. However, because this endpoint is physically located +// in the US East (N. Virginia) Region, your logs list us-east-1 as the event +// Region. CloudTrail does not write these logs to the US East (Ohio) Region +// unless you choose to include global service logs in that Region. CloudTrail +// writes calls to all Regional endpoints to their respective Regions. For example, +// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) +// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU +// (Frankfurt) Region. +// +// To learn more about CloudTrail, including how to turn it on and find your +// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index e24884ef..41ea09c3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -67,7 +67,7 @@ const ( // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating - // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. ErrCodeRegionDisabledException = "RegionDisabledException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 00000000..e2e1d6ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/hashicorp/go-getter/.travis.yml b/vendor/github.com/hashicorp/go-getter/.travis.yml deleted file mode 100644 index 4fe9176a..00000000 --- a/vendor/github.com/hashicorp/go-getter/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -sudo: false - -addons: - apt: - sources: - - sourceline: 'ppa:git-core/ppa' - packages: - - git - -language: go - -os: - - linux - - osx - -go: - - "1.11.x" - -before_script: - - go build ./cmd/go-getter - -branches: - only: - - master diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md index 3de23c70..bbcd15de 100644 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -1,10 +1,10 @@ # go-getter -[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis] +[![CircleCI](https://circleci.com/gh/hashicorp/go-getter/tree/master.svg?style=svg)][circleci] [![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor] [![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] -[travis]: http://travis-ci.org/hashicorp/go-getter +[circleci]: https://circleci.com/gh/hashicorp/go-getter/tree/master [godocs]: http://godoc.org/github.com/hashicorp/go-getter [appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master @@ -356,3 +356,7 @@ In order to access to GCS, authentication credentials should be provided. More i - gcs::https://www.googleapis.com/storage/v1/bucket - gcs::https://www.googleapis.com/storage/v1/bucket/foo.zip - www.googleapis.com/storage/v1/bucket/foo + +#### GCS Testing + +The tests for `get_gcs.go` require you to have GCP credentials set in your environment. These credentials can have any level of permissions to any project, they just need to exist. This means setting `GOOGLE_APPLICATION_CREDENTIALS="~/path/to/credentials.json"` or `GOOGLE_CREDENTIALS="{stringified-credentials-json}"`. Due to this configuration, `get_gcs_test.go` will fail for external contributors in CircleCI. diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go index 007a78ba..38fb43b8 100644 --- a/vendor/github.com/hashicorp/go-getter/client.go +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -19,7 +19,7 @@ import ( // Using a client directly allows more fine-grained control over how downloading // is done, as well as customizing the protocols supported. type Client struct { - // Ctx for cancellation + // Ctx for cancellation Ctx context.Context // Src is the source URL to get. diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go index bb1ec316..1b9f4be8 100644 --- a/vendor/github.com/hashicorp/go-getter/get_git.go +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -1,6 +1,7 @@ package getter import ( + "bytes" "context" "encoding/base64" "fmt" @@ -9,6 +10,7 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "runtime" "strconv" "strings" @@ -24,6 +26,8 @@ type GitGetter struct { getter } +var defaultBranchRegexp = regexp.MustCompile(`\s->\sorigin/(.*)`) + func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { return ClientModeDir, nil } @@ -182,10 +186,10 @@ func (g *GitGetter) update(ctx context.Context, dst, sshKeyFile, ref string, dep cmd.Dir = dst if getRunCommand(cmd) != nil { - // Not a branch, switch to master. This will also catch non-existent - // branches, in which case we want to switch to master and then - // checkout the proper branch later. - ref = "master" + // Not a branch, switch to default branch. This will also catch + // non-existent branches, in which case we want to switch to default + // and then checkout the proper branch later. + ref = findDefaultBranch(dst) } // We have to be on a branch to pull @@ -216,6 +220,22 @@ func (g *GitGetter) fetchSubmodules(ctx context.Context, dst, sshKeyFile string, return getRunCommand(cmd) } +// findDefaultBranch checks the repo's origin remote for its default branch +// (generally "master"). "master" is returned if an origin default branch +// can't be determined. +func findDefaultBranch(dst string) string { + var stdoutbuf bytes.Buffer + cmd := exec.Command("git", "branch", "-r", "--points-at", "refs/remotes/origin/HEAD") + cmd.Dir = dst + cmd.Stdout = &stdoutbuf + err := cmd.Run() + matches := defaultBranchRegexp.FindStringSubmatch(stdoutbuf.String()) + if err != nil || matches == nil { + return "master" + } + return matches[len(matches)-1] +} + // setupGitEnv sets up the environment for the given command. This is used to // pass configuration data to git and ssh and enables advanced cloning methods. func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go index 7c4541c6..9ffdba78 100644 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -9,7 +9,6 @@ import ( "net/url" "os" "path/filepath" - "strconv" "strings" safetemp "github.com/hashicorp/go-safetemp" @@ -88,7 +87,10 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { return err } - req.Header = g.Header + if g.Header != nil { + req.Header = g.Header + } + resp, err := g.Client.Do(req) if err != nil { return err @@ -128,6 +130,12 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { return g.getSubdir(ctx, dst, source, subDir) } +// GetFile fetches the file from src and stores it at dst. +// If the server supports Accept-Range, HttpGetter will attempt a range +// request. This means it is the caller's responsibility to ensure that an +// older version of the destination file does not exist, else it will be either +// falsely identified as being replaced, or corrupted with extra bytes +// appended. func (g *HttpGetter) GetFile(dst string, src *url.URL) error { ctx := g.Context() if g.Netrc { @@ -136,7 +144,6 @@ func (g *HttpGetter) GetFile(dst string, src *url.URL) error { return err } } - // Create all the parent directories if needed if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { return err @@ -165,18 +172,17 @@ func (g *HttpGetter) GetFile(dst string, src *url.URL) error { req.Header = g.Header } headResp, err := g.Client.Do(req) - if err == nil && headResp != nil { + if err == nil { headResp.Body.Close() if headResp.StatusCode == 200 { // If the HEAD request succeeded, then attempt to set the range // query if we can. - if headResp.Header.Get("Accept-Ranges") == "bytes" { + if headResp.Header.Get("Accept-Ranges") == "bytes" && headResp.ContentLength >= 0 { if fi, err := f.Stat(); err == nil { - if _, err = f.Seek(0, os.SEEK_END); err == nil { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + if _, err = f.Seek(0, io.SeekEnd); err == nil { currentFileSize = fi.Size() - totalFileSize, _ := strconv.ParseInt(headResp.Header.Get("Content-Length"), 10, 64) - if currentFileSize >= totalFileSize { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", currentFileSize)) + if currentFileSize >= headResp.ContentLength { // file already present return nil } diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index fe305ad5..46ee09fc 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -141,11 +141,6 @@ This plugin system will give host processes a system for constraining versions. This is in addition to the protocol versioning already present which is more for larger underlying changes. -**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter) -to support automatic download + install of plugins. Paired with cryptographically -secure plugins (above), we can make this a safe operation for an amazing -user experience. - ## What About Shared Libraries? When we started using plugins (late 2012, early 2013), plugins over RPC diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index f9e9effe..780a3121 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -212,6 +212,12 @@ type ReattachConfig struct { Protocol Protocol Addr net.Addr Pid int + + // Test is set to true if this is reattaching to to a plugin in "test mode" + // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the + // process and instead will rely on the plugin to terminate itself. This + // should not be used in non-test environments. + Test bool } // SecureConfig is used to configure a client to verify the integrity of an @@ -825,15 +831,21 @@ func (c *Client) reattach() (net.Addr, error) { c.exited = true }(p.Pid) - // Set the address and process + // Set the address and protocol c.address = c.config.Reattach.Addr - c.process = p c.protocol = c.config.Reattach.Protocol if c.protocol == "" { // Default the protocol to net/rpc for backwards compatibility c.protocol = ProtocolNetRPC } + // If we're in test mode, we do NOT set the process. This avoids the + // process being killed (the only purpose we have for c.process), since + // in test mode the process is responsible for exiting on its own. + if !c.config.Reattach.Test { + c.process = p + } + return c.address, nil } diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go index 40977c0a..6231a9fd 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -7,12 +7,11 @@ import ( "io" empty "github.com/golang/protobuf/ptypes/empty" - "github.com/hashicorp/go-hclog" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - - "github.com/hashicorp/go-plugin/internal/plugin" ) // grpcStdioBuffer is the buffer size we try to fill when sending a chunk of @@ -101,10 +100,10 @@ func newGRPCStdioClient( // Connect immediately to the endpoint stdioClient, err := client.StreamStdio(ctx, &empty.Empty{}) - // If we get an Unavailable error, this means that the plugin isn't + // If we get an Unavailable or Unimplemented error, this means that the plugin isn't // updated and linking to the latest version of go-plugin that supports // this. We fall back to the previous behavior of just not syncing anything. - if status.Code(err) == codes.Unavailable { + if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented { log.Warn("stdio service not available, stdout/stderr syncing unavailable") stdioClient = nil err = nil @@ -135,6 +134,7 @@ func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { if err == io.EOF || status.Code(err) == codes.Unavailable || status.Code(err) == codes.Canceled || + status.Code(err) == codes.Unimplemented || err == context.Canceled { c.log.Warn("received EOF, stopping recv loop", "err", err) return diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index e8b8c977..002d6080 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -1,11 +1,13 @@ package plugin import ( + "context" "crypto/tls" "crypto/x509" "encoding/base64" "errors" "fmt" + "io" "io/ioutil" "log" "net" @@ -83,6 +85,51 @@ type ServeConfig struct { // Logger is used to pass a logger into the server. If none is provided the // server will create a default logger. Logger hclog.Logger + + // Test, if non-nil, will put plugin serving into "test mode". This is + // meant to be used as part of `go test` within a plugin's codebase to + // launch the plugin in-process and output a ReattachConfig. + // + // This changes the behavior of the server in a number of ways to + // accomodate the expectation of running in-process: + // + // * The handshake cookie is not validated. + // * Stdout/stderr will receive plugin reads and writes + // * Connection information will not be sent to stdout + // + Test *ServeTestConfig +} + +// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test. +type ServeTestConfig struct { + // Context, if set, will force the plugin serving to end when cancelled. + // This is only a test configuration because the non-test configuration + // expects to take over the process and therefore end on an interrupt or + // kill signal. For tests, we need to kill the plugin serving routinely + // and this provides a way to do so. + // + // If you want to wait for the plugin process to close before moving on, + // you can wait on CloseCh. + Context context.Context + + // If this channel is non-nil, we will send the ReattachConfig via + // this channel. This can be encoded (via JSON recommended) to the + // plugin client to attach to this plugin. + ReattachConfigCh chan<- *ReattachConfig + + // CloseCh, if non-nil, will be closed when serving exits. This can be + // used along with Context to determine when the server is fully shut down. + // If this is not set, you can still use Context on its own, but note there + // may be a period of time between canceling the context and the plugin + // server being shut down. + CloseCh chan<- struct{} + + // SyncStdio, if true, will enable the client side "SyncStdout/Stderr" + // functionality to work. This defaults to false because the implementation + // of making this work within test environments is particularly messy + // and SyncStdio functionality is fairly rare, so we default to the simple + // scenario. + SyncStdio bool } // protocolVersion determines the protocol version and plugin set to be used by @@ -167,26 +214,46 @@ func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { // Serve serves the plugins given by ServeConfig. // // Serve doesn't return until the plugin is done being executed. Any -// errors will be outputted to os.Stderr. +// fixable errors will be output to os.Stderr and the process will +// exit with a status code of 1. Serve will panic for unexpected +// conditions where a user's fix is unknown. // // This is the method that plugins should call in their main() functions. func Serve(opts *ServeConfig) { - // Validate the handshake config - if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { - fmt.Fprintf(os.Stderr, - "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ - "key or value was set. Please notify the plugin author and report\n"+ - "this as a bug.\n") - os.Exit(1) - } + exitCode := -1 + // We use this to trigger an `os.Exit` so that we can execute our other + // deferred functions. In test mode, we just output the err to stderr + // and return. + defer func() { + if opts.Test == nil && exitCode >= 0 { + os.Exit(exitCode) + } - // First check the cookie - if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { - fmt.Fprintf(os.Stderr, - "This binary is a plugin. These are not meant to be executed directly.\n"+ - "Please execute the program that consumes these plugins, which will\n"+ - "load any plugins automatically\n") - os.Exit(1) + if opts.Test != nil && opts.Test.CloseCh != nil { + close(opts.Test.CloseCh) + } + }() + + if opts.Test == nil { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + exitCode = 1 + return + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + exitCode = 1 + return + } } // negotiate the version and plugins @@ -206,19 +273,6 @@ func Serve(opts *ServeConfig) { }) } - // Create our new stdout, stderr files. These will override our built-in - // stdout/stderr so that it works across the stream boundary. - stdout_r, stdout_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - stderr_r, stderr_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - // Register a listener so we can accept a connection listener, err := serverListener() if err != nil { @@ -279,6 +333,33 @@ func Serve(opts *ServeConfig) { // Create the channel to tell us when we're done doneCh := make(chan struct{}) + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + var stdout_r, stderr_r io.Reader + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // If we're in test mode, we tee off the reader and write the data + // as-is to our normal Stdout and Stderr so that they continue working + // while stdio works. This is because in test mode, we assume we're running + // in `go test` or some equivalent and we want output to go to standard + // locations. + if opts.Test != nil { + // TODO(mitchellh): This isn't super ideal because a TeeReader + // only works if the reader side is actively read. If we never + // connect via a plugin client, the output still gets swallowed. + stdout_r = io.TeeReader(stdout_r, os.Stdout) + stderr_r = io.TeeReader(stderr_r, os.Stderr) + } + // Build the server type var server ServerProtocol switch protoType { @@ -321,35 +402,96 @@ func Serve(opts *ServeConfig) { logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) - // Output the address and service name to stdout so that the client can bring it up. - fmt.Printf("%d|%d|%s|%s|%s|%s\n", - CoreProtocolVersion, - protoVersion, - listener.Addr().Network(), - listener.Addr().String(), - protoType, - serverCert) - os.Stdout.Sync() - - // Eat the interrupts - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) - go func() { - count := 0 - for { - <-ch - count++ - logger.Trace("plugin received interrupt signal, ignoring", "count", count) + // Output the address and service name to stdout so that the client can + // bring it up. In test mode, we don't do this because clients will + // attach via a reattach config. + if opts.Test == nil { + fmt.Printf("%d|%d|%s|%s|%s|%s\n", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + os.Stdout.Sync() + } else if ch := opts.Test.ReattachConfigCh; ch != nil { + // Send back the reattach config that can be used. This isn't + // quite ready if they connect immediately but the client should + // retry a few times. + ch <- &ReattachConfig{ + Protocol: protoType, + Addr: listener.Addr(), + Pid: os.Getpid(), + Test: true, } - }() + } + + // Eat the interrupts. In test mode we disable this so that go test + // can be cancelled properly. + if opts.Test == nil { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + count := 0 + for { + <-ch + count++ + logger.Trace("plugin received interrupt signal, ignoring", "count", count) + } + }() + } - // Set our new out, err - os.Stdout = stdout_w - os.Stderr = stderr_w + // Set our stdout, stderr to the stdio stream that clients can retrieve + // using ClientConfig.SyncStdout/err. We only do this for non-test mode + // or if the test mode explicitly requests it. + // + // In test mode, we use a multiwriter so that the data continues going + // to the normal stdout/stderr so output can show up in test logs. We + // also send to the stdio stream so that clients can continue working + // if they depend on that. + if opts.Test == nil || opts.Test.SyncStdio { + if opts.Test != nil { + // In test mode we need to maintain the original values so we can + // reset it. + defer func(out, err *os.File) { + os.Stdout = out + os.Stderr = err + }(os.Stdout, os.Stderr) + } + os.Stdout = stdout_w + os.Stderr = stderr_w + } // Accept connections and wait for completion go server.Serve(listener) - <-doneCh + + ctx := context.Background() + if opts.Test != nil && opts.Test.Context != nil { + ctx = opts.Test.Context + } + select { + case <-ctx.Done(): + // Cancellation. We can stop the server by closing the listener. + // This isn't graceful at all but this is currently only used by + // tests and its our only way to stop. + listener.Close() + + // If this is a grpc server, then we also ask the server itself to + // end which will kill all connections. There isn't an easy way to do + // this for net/rpc currently but net/rpc is more and more unused. + if s, ok := server.(*GRPCServer); ok { + s.Stop() + } + + // Wait for the server itself to shut down + <-doneCh + + case <-doneCh: + // Note that given the documentation of Serve we should probably be + // setting exitCode = 0 and using os.Exit here. That's how it used to + // work before extracting this library. However, for years we've done + // this so we'll keep this functionality. + } } func serverListener() (net.Listener, error) { @@ -388,7 +530,7 @@ func serverListener_tcp() (net.Listener, error) { } if minPort > maxPort { - return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) + return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) } for port := minPort; port <= maxPort; port++ { diff --git a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md index ccb46bbd..4c644fcf 100644 --- a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md +++ b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md @@ -1,5 +1,38 @@ # HCL Changelog +## v2.3.0 (Jan 3, 2020) + +### Enhancements + +* ext/tryfunc: Optional functions `try` and `can` to include in your `hcl.EvalContext` when evaluating expressions, which allow users to make decisions based on the success of expressions. ([#330](https://github.com/hashicorp/hcl/pull/330)) +* ext/typeexpr: Now has an optional function `convert` which you can include in your `hcl.EvalContext` when evaluating expressions, allowing users to convert values to specific type constraints using the type constraint expression syntax. ([#330](https://github.com/hashicorp/hcl/pull/330)) +* ext/typeexpr: A new `cty` capsule type `typeexpr.TypeConstraintType` which, when used as either a type constraint for a function parameter or as a type constraint for a `hcldec` attribute specification will cause the given expression to be interpreted as a type constraint expression rather than a value expression. ([#330](https://github.com/hashicorp/hcl/pull/330)) +* ext/customdecode: An optional extension that allows overriding the static decoding behavior for expressions either in function arguments or `hcldec` attribute specifications. ([#330](https://github.com/hashicorp/hcl/pull/330)) +* ext/customdecode: New `cty` capsuletypes `customdecode.ExpressionType` and `customdecode.ExpressionClosureType` which, when used as either a type constraint for a function parameter or as a type constraint for a `hcldec` attribute specification will cause the given expression (and, for the closure type, also the `hcl.EvalContext` it was evaluated in) to be captured for later analysis, rather than immediately evaluated. ([#330](https://github.com/hashicorp/hcl/pull/330)) + +## v2.2.0 (Dec 11, 2019) + +### Enhancements + +* hcldec: Attribute evaluation (as part of `AttrSpec` or `BlockAttrsSpec`) now captures expression evaluation metadata in any errors it produces during type conversions, allowing for better feedback in calling applications that are able to make use of this metadata when printing diagnostic messages. ([#329](https://github.com/hashicorp/hcl/pull/329)) + +### Bugs Fixed + +* hclsyntax: `IndexExpr`, `SplatExpr`, and `RelativeTraversalExpr` will now report a source range that covers all of their child expression nodes. Previously they would report only the operator part, such as `["foo"]`, `[*]`, or `.foo`, which was problematic for callers using source ranges for code analysis. ([#328](https://github.com/hashicorp/hcl/pull/328)) +* hclwrite: Parser will no longer panic when the input includes index, splat, or relative traversal syntax. ([#328](https://github.com/hashicorp/hcl/pull/328)) + +## v2.1.0 (Nov 19, 2019) + +### Enhancements + +* gohcl: When decoding into a struct value with some fields already populated, those values will be retained if not explicitly overwritten in the given HCL body, with similar overriding/merging behavior as `json.Unmarshal` in the Go standard library. +* hclwrite: New interface to set the expression for an attribute to be a raw token sequence, with no special processing. This has some caveats, so if you intend to use it please refer to the godoc comments. ([#320](https://github.com/hashicorp/hcl/pull/320)) + +### Bugs Fixed + +* hclwrite: The `Body.Blocks` method was returing the blocks in an indefined order, rather than preserving the order of declaration in the source input. ([#313](https://github.com/hashicorp/hcl/pull/313)) +* hclwrite: The `TokensForTraversal` function (and thus in turn the `Body.SetAttributeTraversal` method) was not correctly handling index steps in traversals, and thus producing invalid results. ([#319](https://github.com/hashicorp/hcl/pull/319)) + ## v2.0.0 (Oct 2, 2019) Initial release of HCL 2, which is a new implementating combining the HCL 1 diff --git a/vendor/github.com/hashicorp/hcl/v2/README.md b/vendor/github.com/hashicorp/hcl/v2/README.md index d807a424..3d0d509d 100644 --- a/vendor/github.com/hashicorp/hcl/v2/README.md +++ b/vendor/github.com/hashicorp/hcl/v2/README.md @@ -8,7 +8,7 @@ towards devops tools, servers, etc. > **NOTE:** This is major version 2 of HCL, whose Go API is incompatible with > major version 1. Both versions are available for selection in Go Modules > projects. HCL 2 _cannot_ be imported from Go projects that are not using Go Modules. For more information, see -> [our version selection guide](https://github.com/golang/go/wiki/Version-Selection). +> [our version selection guide](https://github.com/hashicorp/hcl/wiki/Version-Selection). HCL has both a _native syntax_, intended to be pleasant to read and write for humans, and a JSON-based variant that is easier for machines to generate @@ -51,7 +51,8 @@ func main() { ``` A lower-level API is available for applications that need more control over -the parsing, decoding, and evaluation of configuration. +the parsing, decoding, and evaluation of configuration. For more information, +see [the package documentation](https://pkg.go.dev/github.com/hashicorp/hcl/v2). ## Why? @@ -156,9 +157,9 @@ syntax allows use of arbitrary expressions within JSON strings: For more information, see the detailed specifications: -* [Syntax-agnostic Information Model](hcl/spec.md) -* [HCL Native Syntax](hcl/hclsyntax/spec.md) -* [JSON Representation](hcl/json/spec.md) +* [Syntax-agnostic Information Model](spec.md) +* [HCL Native Syntax](hclsyntax/spec.md) +* [JSON Representation](json/spec.md) ## Changes in 2.0 diff --git a/vendor/github.com/hashicorp/hcl/v2/appveyor.yml b/vendor/github.com/hashicorp/hcl/v2/appveyor.yml new file mode 100644 index 00000000..e382f8f5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/appveyor.yml @@ -0,0 +1,13 @@ +build: off + +clone_folder: c:\gopath\src\github.com\hashicorp\hcl + +environment: + GOPATH: c:\gopath + GO111MODULE: on + GOPROXY: https://goproxy.io + +stack: go 1.12 + +test_script: + - go test ./... diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/README.md new file mode 100644 index 00000000..1636f577 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/README.md @@ -0,0 +1,209 @@ +# HCL Custom Static Decoding Extension + +This HCL extension provides a mechanism for defining arguments in an HCL-based +language whose values are derived using custom decoding rules against the +HCL expression syntax, overriding the usual behavior of normal expression +evaluation. + +"Arguments", for the purpose of this extension, currently includes the +following two contexts: + +* For applications using `hcldec` for dynamic decoding, a `hcldec.AttrSpec` + or `hcldec.BlockAttrsSpec` can be given a special type constraint that + opts in to custom decoding behavior for the attribute(s) that are selected + by that specification. + +* When working with the HCL native expression syntax, a function given in + the `hcl.EvalContext` during evaluation can have parameters with special + type constraints that opt in to custom decoding behavior for the argument + expression associated with that parameter in any call. + +The above use-cases are rather abstract, so we'll consider a motivating +real-world example: sometimes we (language designers) need to allow users +to specify type constraints directly in the language itself, such as in +[Terraform's Input Variables](https://www.terraform.io/docs/configuration/variables.html). +Terraform's `variable` blocks include an argument called `type` which takes +a type constraint given using HCL expression building-blocks as defined by +[the HCL `typeexpr` extension](../typeexpr/README.md). + +A "type constraint expression" of that sort is not an expression intended to +be evaluated in the usual way. Instead, the physical expression is +deconstructed using [the static analysis operations](../../spec.md#static-analysis) +to produce a `cty.Type` as the result, rather than a `cty.Value`. + +The purpose of this Custom Static Decoding Extension, then, is to provide a +bridge to allow that sort of custom decoding to be used via mechanisms that +normally deal in `cty.Value`, such as `hcldec` and native syntax function +calls as listed above. + +(Note: [`gohcl`](https://pkg.go.dev/github.com/hashicorp/hcl/v2/gohcl) has +its own mechanism to support this use case, exploiting the fact that it is +working directly with "normal" Go types. Decoding into a struct field of +type `hcl.Expression` obtains the expression directly without evaluating it +first. The Custom Static Decoding Extension is not necessary for that `gohcl` +technique. You can also implement custom decoding by working directly with +the lowest-level HCL API, which separates extraction of and evaluation of +expressions into two steps.) + +## Custom Decoding Types + +This extension relies on a convention implemented in terms of +[_Capsule Types_ in the underlying `cty` type system](https://github.com/zclconf/go-cty/blob/master/docs/types.md#capsule-types). `cty` allows a capsule type to carry arbitrary +extension metadata values as an aid to creating higher-level abstractions like +this extension. + +A custom argument decoding mode, then, is implemented by creating a new `cty` +capsule type that implements the `ExtensionData` custom operation to return +a decoding function when requested. For example: + +```go +var keywordType cty.Type +keywordType = cty.CapsuleWithOps("keyword", reflect.TypeOf(""), &cty.CapsuleOps{ + ExtensionData: func(key interface{}) interface{} { + switch key { + case customdecode.CustomExpressionDecoder: + return customdecode.CustomExpressionDecoderFunc( + func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + kw := hcl.ExprAsKeyword(expr) + if kw == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid keyword", + Detail: "A keyword is required", + Subject: expr.Range().Ptr(), + }) + return cty.UnkownVal(keywordType), diags + } + return cty.CapsuleVal(keywordType, &kw) + }, + ) + default: + return nil + } + }, +}) +``` + +The boilerplate here is a bit fussy, but the important part for our purposes +is the `case customdecode.CustomExpressionDecoder:` clause, which uses +a custom extension key type defined in this package to recognize when a +component implementing this extension is checking to see if a target type +has a custom decode implementation. + +In the above case we've defined a type that decodes expressions as static +keywords, so a keyword like `foo` would decode as an encapsulated `"foo"` +string, while any other sort of expression like `"baz"` or `1 + 1` would +return an error. + +We could then use `keywordType` as a type constraint either for a function +parameter or a `hcldec` attribute specification, which would require the +argument for that function parameter or the expression for the matching +attributes to be a static keyword, rather than an arbitrary expression. +For example, in a `hcldec.AttrSpec`: + +```go +keywordSpec := &hcldec.AttrSpec{ + Name: "keyword", + Type: keywordType, +} +``` + +The above would accept input like the following and would set its result to +a `cty.Value` of `keywordType`, after decoding: + +```hcl +keyword = foo +``` + +## The Expression and Expression Closure `cty` types + +Building on the above, this package also includes two capsule types that use +the above mechanism to allow calling applications to capture expressions +directly and thus defer analysis to a later step, after initial decoding. + +The `customdecode.ExpressionType` type encapsulates an `hcl.Expression` alone, +for situations like our type constraint expression example above where it's +the static structure of the expression we want to inspect, and thus any +variables and functions defined in the evaluation context are irrelevant. + +The `customdecode.ExpressionClosureType` type encapsulates a +`*customdecode.ExpressionClosure` value, which binds the given expression to +the `hcl.EvalContext` it was asked to evaluate against and thus allows the +receiver of that result to later perform normal evaluation of the expression +with all the same variables and functions that would've been available to it +naturally. + +Both of these types can be used as type constraints either for `hcldec` +attribute specifications or for function arguments. Here's an example of +`ExpressionClosureType` to implement a function that can evaluate +an expression with some additional variables defined locally, which we'll +call the `with(...)` function: + +```go +var WithFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "variables", + Type: cty.DynamicPseudoType, + }, + { + Name: "expression", + Type: customdecode.ExpressionClosureType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + varsVal := args[0] + exprVal := args[1] + if !varsVal.Type().IsObjectType() { + return cty.NilVal, function.NewArgErrorf(0, "must be an object defining local variables") + } + if !varsVal.IsKnown() { + // We can't predict our result type until the variables object + // is known. + return cty.DynamicPseudoType, nil + } + vars := varsVal.AsValueMap() + closure := customdecode.ExpressionClosureFromVal(exprVal) + result, err := evalWithLocals(vars, closure) + if err != nil { + return cty.NilVal, err + } + return result.Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + varsVal := args[0] + exprVal := args[1] + vars := varsVal.AsValueMap() + closure := customdecode.ExpressionClosureFromVal(exprVal) + return evalWithLocals(vars, closure) + }, +}) + +func evalWithLocals(locals map[string]cty.Value, closure *customdecode.ExpressionClosure) (cty.Value, error) { + childCtx := closure.EvalContext.NewChild() + childCtx.Variables = locals + val, diags := closure.Expression.Value(childCtx) + if diags.HasErrors() { + return cty.NilVal, function.NewArgErrorf(1, "couldn't evaluate expression: %s", diags.Error()) + } + return val, nil +} +``` + +If the above function were placed into an `hcl.EvalContext` as `with`, it +could be used in a native syntax call to that function as follows: + +```hcl + foo = with({name = "Cory"}, "${greeting}, ${name}!") +``` + +The above assumes a variable in the main context called `greeting`, to which +the `with` function adds `name` before evaluating the expression given in +its second argument. This makes that second argument context-sensitive -- it +would behave differently if the user wrote the same thing somewhere else -- so +this capability should be used with care to make sure it doesn't cause confusion +for the end-users of your language. + +There are some other examples of this capability to evaluate expressions in +unusual ways in the `tryfunc` directory that is a sibling of this one. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go new file mode 100644 index 00000000..c9d7a1ef --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go @@ -0,0 +1,56 @@ +// Package customdecode contains a HCL extension that allows, in certain +// contexts, expression evaluation to be overridden by custom static analysis. +// +// This mechanism is only supported in certain specific contexts where +// expressions are decoded with a specific target type in mind. For more +// information, see the documentation on CustomExpressionDecoder. +package customdecode + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +type customDecoderImpl int + +// CustomExpressionDecoder is a value intended to be used as a cty capsule +// type ExtensionData key for capsule types whose values are to be obtained +// by static analysis of an expression rather than normal evaluation of that +// expression. +// +// When a cooperating capsule type is asked for ExtensionData with this key, +// it must return a non-nil CustomExpressionDecoderFunc value. +// +// This mechanism is not universally supported; instead, it's handled in a few +// specific places where expressions are evaluated with the intent of producing +// a cty.Value of a type given by the calling application. +// +// Specifically, this currently works for type constraints given in +// hcldec.AttrSpec and hcldec.BlockAttrsSpec, and it works for arguments to +// function calls in the HCL native syntax. HCL extensions implemented outside +// of the main HCL module may also implement this; consult their own +// documentation for details. +const CustomExpressionDecoder = customDecoderImpl(1) + +// CustomExpressionDecoderFunc is the type of value that must be returned by +// a capsule type handling the key CustomExpressionDecoder in its ExtensionData +// implementation. +// +// If no error diagnostics are returned, the result value MUST be of the +// capsule type that the decoder function was derived from. If the returned +// error diagnostics prevent producing a value at all, return cty.NilVal. +type CustomExpressionDecoderFunc func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + +// CustomExpressionDecoderForType takes any cty type and returns its +// custom expression decoder implementation if it has one. If it is not a +// capsule type or it does not implement a custom expression decoder, this +// function returns nil. +func CustomExpressionDecoderForType(ty cty.Type) CustomExpressionDecoderFunc { + if !ty.IsCapsuleType() { + return nil + } + if fn, ok := ty.CapsuleExtensionData(CustomExpressionDecoder).(CustomExpressionDecoderFunc); ok { + return fn + } + return nil +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go new file mode 100644 index 00000000..af7c66c2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go @@ -0,0 +1,146 @@ +package customdecode + +import ( + "fmt" + "reflect" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// ExpressionType is a cty capsule type that carries hcl.Expression values. +// +// This type implements custom decoding in the most general way possible: it +// just captures whatever expression is given to it, with no further processing +// whatsoever. It could therefore be useful in situations where an application +// must defer processing of the expression content until a later step. +// +// ExpressionType only captures the expression, not the evaluation context it +// was destined to be evaluated in. That means this type can be fine for +// situations where the recipient of the value only intends to do static +// analysis, but ExpressionClosureType is more appropriate in situations where +// the recipient will eventually evaluate the given expression. +var ExpressionType cty.Type + +// ExpressionVal returns a new cty value of type ExpressionType, wrapping the +// given expression. +func ExpressionVal(expr hcl.Expression) cty.Value { + return cty.CapsuleVal(ExpressionType, &expr) +} + +// ExpressionFromVal returns the expression encapsulated in the given value, or +// panics if the value is not a known value of ExpressionType. +func ExpressionFromVal(v cty.Value) hcl.Expression { + if !v.Type().Equals(ExpressionType) { + panic("value is not of ExpressionType") + } + ptr := v.EncapsulatedValue().(*hcl.Expression) + return *ptr +} + +// ExpressionClosureType is a cty capsule type that carries hcl.Expression +// values along with their original evaluation contexts. +// +// This is similar to ExpressionType except that during custom decoding it +// also captures the hcl.EvalContext that was provided, allowing callers to +// evaluate the expression later in the same context where it would originally +// have been evaluated, or a context derived from that one. +var ExpressionClosureType cty.Type + +// ExpressionClosure is the type encapsulated in ExpressionClosureType +type ExpressionClosure struct { + Expression hcl.Expression + EvalContext *hcl.EvalContext +} + +// ExpressionClosureVal returns a new cty value of type ExpressionClosureType, +// wrapping the given expression closure. +func ExpressionClosureVal(closure *ExpressionClosure) cty.Value { + return cty.CapsuleVal(ExpressionClosureType, closure) +} + +// Value evaluates the closure's expression using the closure's EvalContext, +// returning the result. +func (c *ExpressionClosure) Value() (cty.Value, hcl.Diagnostics) { + return c.Expression.Value(c.EvalContext) +} + +// ExpressionClosureFromVal returns the expression closure encapsulated in the +// given value, or panics if the value is not a known value of +// ExpressionClosureType. +// +// The caller MUST NOT modify the returned closure or the EvalContext inside +// it. To derive a new EvalContext, either create a child context or make +// a copy. +func ExpressionClosureFromVal(v cty.Value) *ExpressionClosure { + if !v.Type().Equals(ExpressionClosureType) { + panic("value is not of ExpressionClosureType") + } + return v.EncapsulatedValue().(*ExpressionClosure) +} + +func init() { + // Getting hold of a reflect.Type for hcl.Expression is a bit tricky because + // it's an interface type, but we can do it with some indirection. + goExpressionType := reflect.TypeOf((*hcl.Expression)(nil)).Elem() + + ExpressionType = cty.CapsuleWithOps("expression", goExpressionType, &cty.CapsuleOps{ + ExtensionData: func(key interface{}) interface{} { + switch key { + case CustomExpressionDecoder: + return CustomExpressionDecoderFunc( + func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return ExpressionVal(expr), nil + }, + ) + default: + return nil + } + }, + TypeGoString: func(_ reflect.Type) string { + return "customdecode.ExpressionType" + }, + GoString: func(raw interface{}) string { + exprPtr := raw.(*hcl.Expression) + return fmt.Sprintf("customdecode.ExpressionVal(%#v)", *exprPtr) + }, + RawEquals: func(a, b interface{}) bool { + aPtr := a.(*hcl.Expression) + bPtr := b.(*hcl.Expression) + return reflect.DeepEqual(*aPtr, *bPtr) + }, + }) + ExpressionClosureType = cty.CapsuleWithOps("expression closure", reflect.TypeOf(ExpressionClosure{}), &cty.CapsuleOps{ + ExtensionData: func(key interface{}) interface{} { + switch key { + case CustomExpressionDecoder: + return CustomExpressionDecoderFunc( + func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return ExpressionClosureVal(&ExpressionClosure{ + Expression: expr, + EvalContext: ctx, + }), nil + }, + ) + default: + return nil + } + }, + TypeGoString: func(_ reflect.Type) string { + return "customdecode.ExpressionClosureType" + }, + GoString: func(raw interface{}) string { + closure := raw.(*ExpressionClosure) + return fmt.Sprintf("customdecode.ExpressionClosureVal(%#v)", closure) + }, + RawEquals: func(a, b interface{}) bool { + closureA := a.(*ExpressionClosure) + closureB := b.(*ExpressionClosure) + // The expression itself compares by deep equality, but EvalContexts + // conventionally compare by pointer identity, so we'll comply + // with both conventions here by testing them separately. + return closureA.EvalContext == closureB.EvalContext && + reflect.DeepEqual(closureA.Expression, closureB.Expression) + }, + }) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/go.mod b/vendor/github.com/hashicorp/hcl/v2/go.mod index c152e601..d80c99d9 100644 --- a/vendor/github.com/hashicorp/hcl/v2/go.mod +++ b/vendor/github.com/hashicorp/hcl/v2/go.mod @@ -6,7 +6,7 @@ require ( github.com/apparentlymart/go-textseg v1.0.0 github.com/davecgh/go-spew v1.1.1 github.com/go-test/deep v1.0.3 - github.com/google/go-cmp v0.2.0 + github.com/google/go-cmp v0.3.1 github.com/kr/pretty v0.1.0 github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 @@ -14,7 +14,7 @@ require ( github.com/sergi/go-diff v1.0.0 github.com/spf13/pflag v1.0.2 github.com/stretchr/testify v1.2.2 // indirect - github.com/zclconf/go-cty v1.1.0 + github.com/zclconf/go-cty v1.2.0 golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 // indirect golang.org/x/text v0.3.2 // indirect diff --git a/vendor/github.com/hashicorp/hcl/v2/go.sum b/vendor/github.com/hashicorp/hcl/v2/go.sum index b3b95415..76b135fb 100644 --- a/vendor/github.com/hashicorp/hcl/v2/go.sum +++ b/vendor/github.com/hashicorp/hcl/v2/go.sum @@ -9,8 +9,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -29,8 +29,8 @@ github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/zclconf/go-cty v1.1.0 h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v1.2.0 h1:sPHsy7ADcIZQP3vILvTjrh74ZA175TFP5vqiNK1UmlI= +github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go index 963ed775..3fe84ddc 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go @@ -5,6 +5,7 @@ import ( "sync" "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/customdecode" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" "github.com/zclconf/go-cty/cty/function" @@ -350,26 +351,38 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti param = varParam } - val, argDiags := argExpr.Value(ctx) - if len(argDiags) > 0 { + var val cty.Value + if decodeFn := customdecode.CustomExpressionDecoderForType(param.Type); decodeFn != nil { + var argDiags hcl.Diagnostics + val, argDiags = decodeFn(argExpr, ctx) diags = append(diags, argDiags...) - } + if val == cty.NilVal { + val = cty.UnknownVal(param.Type) + } + } else { + var argDiags hcl.Diagnostics + val, argDiags = argExpr.Value(ctx) + if len(argDiags) > 0 { + diags = append(diags, argDiags...) + } - // Try to convert our value to the parameter type - val, err := convert.Convert(val, param.Type) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid function argument", - Detail: fmt.Sprintf( - "Invalid value for %q parameter: %s.", - param.Name, err, - ), - Subject: argExpr.StartRange().Ptr(), - Context: e.Range().Ptr(), - Expression: argExpr, - EvalContext: ctx, - }) + // Try to convert our value to the parameter type + var err error + val, err = convert.Convert(val, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, + }) + } } argVals[i] = val @@ -615,8 +628,9 @@ type IndexExpr struct { Collection Expression Key Expression - SrcRange hcl.Range - OpenRange hcl.Range + SrcRange hcl.Range + OpenRange hcl.Range + BracketRange hcl.Range } func (e *IndexExpr) walkChildNodes(w internalWalkFunc) { @@ -631,7 +645,7 @@ func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { diags = append(diags, collDiags...) diags = append(diags, keyDiags...) - val, indexDiags := hcl.Index(coll, key, &e.SrcRange) + val, indexDiags := hcl.Index(coll, key, &e.BracketRange) setDiagEvalContext(indexDiags, e, ctx) diags = append(diags, indexDiags...) return val, diags diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go index 6fb284a8..f67d989e 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go @@ -760,7 +760,7 @@ Traversal: Each: travExpr, Item: itemExpr, - SrcRange: hcl.RangeBetween(dot.Range, lastRange), + SrcRange: hcl.RangeBetween(from.Range(), lastRange), MarkerRange: hcl.RangeBetween(dot.Range, marker.Range), } @@ -819,7 +819,7 @@ Traversal: Each: travExpr, Item: itemExpr, - SrcRange: hcl.RangeBetween(open.Range, travExpr.Range()), + SrcRange: hcl.RangeBetween(from.Range(), travExpr.Range()), MarkerRange: hcl.RangeBetween(open.Range, close.Range), } @@ -867,8 +867,9 @@ Traversal: Collection: ret, Key: keyExpr, - SrcRange: rng, - OpenRange: open.Range, + SrcRange: hcl.RangeBetween(from.Range(), rng), + OpenRange: open.Range, + BracketRange: rng, } } } @@ -899,7 +900,7 @@ func makeRelativeTraversal(expr Expression, next hcl.Traverser, rng hcl.Range) E return &RelativeTraversalExpr{ Source: expr, Traversal: hcl.Traversal{next}, - SrcRange: rng, + SrcRange: hcl.RangeBetween(expr.Range(), rng), } } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/doc.go deleted file mode 100644 index b4baab1b..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Package acctest provides the ability to use the binary test driver. The binary -// test driver allows you to run your acceptance tests with a binary of Terraform. -// This is currently the only mechanism for driving tests. It provides a realistic testing -// experience and matrix testing against multiple versions of Terraform CLI, -// as long as they are >= 0.12.0 -// -// The driver must be enabled by initialising the test helper in your TestMain -// function in all provider packages that run acceptance tests. Most providers have only -// one package. -// -// After importing this package, you must define a TestMain and have the following: -// -// func TestMain(m *testing.M) { -// acctest.UseBinaryDriver("provider_name", Provider) -// resource.TestMain(m) -// } -// -// Where `Provider` is the function that returns the instance of a configured `*schema.Provider` -// Some providers already have a TestMain defined, usually for the purpose of enabling test -// sweepers. These additional occurrences should be removed. -// -// It is no longer necessary to import other Terraform providers as Go modules: these -// imports should be removed. -package acctest diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/helper.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/helper.go deleted file mode 100644 index 9853512d..00000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/acctest/helper.go +++ /dev/null @@ -1,26 +0,0 @@ -package acctest - -import ( - "os" - - "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" - tftest "github.com/hashicorp/terraform-plugin-test" -) - -var TestHelper *tftest.Helper - -func UseBinaryDriver(name string, providerFunc plugin.ProviderFunc) { - sourceDir, err := os.Getwd() - if err != nil { - panic(err) - } - - if tftest.RunningAsPlugin() { - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: providerFunc, - }) - os.Exit(0) - } else { - TestHelper = tftest.AutoInitProviderHelper(name, sourceDir) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go index d6abbd1a..3b1ced34 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go @@ -93,14 +93,6 @@ func (d Diagnostic) Validate() error { return nil } -// FromErr will convert an error into a Diagnostic -func FromErr(err error) Diagnostic { - return Diagnostic{ - Severity: Error, - Summary: err.Error(), - } -} - // Severity is an enum type marking the severity level of a Diagnostic type Severity int diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go new file mode 100644 index 00000000..8037b130 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go @@ -0,0 +1,36 @@ +package diag + +import "fmt" + +// FromErr will convert an error into a Diagnostics. This returns Diagnostics +// as the most common use case in Go will be handling a single error +// returned from a function. +// +// if err != nil { +// return diag.FromErr(err) +// } +func FromErr(err error) Diagnostics { + return Diagnostics{ + Diagnostic{ + Severity: Error, + Summary: err.Error(), + }, + } +} + +// Errorf creates a Diagnostics with a single Error level Diagnostic entry. +// The summary is populated by performing a fmt.Sprintf with the supplied +// values. This returns a single error in a Diagnostics as errors typically +// do not occur in multiples as warnings may. +// +// if unexpectedCondition { +// return diag.Errorf("unexpected: %s", someValue) +// } +func Errorf(format string, a ...interface{}) Diagnostics { + return Diagnostics{ + Diagnostic{ + Severity: Error, + Summary: fmt.Sprintf(format, a...), + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go new file mode 100644 index 00000000..3cc40939 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go @@ -0,0 +1,135 @@ +package resource + +import ( + "context" + "encoding/json" + "io/ioutil" + "log" + "os" + "strings" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + grpcplugin "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin" + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" + tftest "github.com/hashicorp/terraform-plugin-test" +) + +func runProviderCommand(f func() error, wd *tftest.WorkingDir, opts *plugin.ServeOpts) error { + // Run the provider in the same process as the test runner using the + // reattach behavior in Terraform. This ensures we get test coverage + // and enables the use of delve as a debugger. + + // the provider name is technically supposed to be specified in the + // format returned by addrs.Provider.GetDisplay(), but 1. I'm not + // importing the entire addrs package for this and 2. we only get the + // provider name here. Fortunately, when only a provider name is + // specified in a provider block--which is how the config file we + // generate does things--Terraform just automatically assumes it's in + // the legacy namespace and the default registry.terraform.io host, + // so we can just construct the output of GetDisplay() ourselves, based + // on the provider name. + providerName := wd.GetHelper().GetPluginName() + + // providerName gets returned as terraform-provider-foo, and we need + // just foo. So let's fix that. + providerName = strings.TrimPrefix(providerName, "terraform-provider-") + + // if we didn't override the logger, let's set a default one. + if opts.Logger == nil { + opts.Logger = hclog.New(&hclog.LoggerOptions{ + Name: "plugintest", + Level: hclog.Trace, + Output: ioutil.Discard, + }) + } + + // this is needed so Terraform doesn't default to expecting protocol 4; + // we're skipping the handshake because Terraform didn't launch the + // plugin. + os.Setenv("PLUGIN_PROTOCOL_VERSIONS", "5") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config, closeCh, err := plugin.DebugServe(ctx, opts) + if err != nil { + return err + } + + // plugin.DebugServe hijacks our log output location, so let's reset it + logging.SetOutput() + + reattachInfo := map[string]plugin.ReattachConfig{} + var namespaces []string + host := "registry.terraform.io" + if v := os.Getenv("TF_ACC_PROVIDER_NAMESPACE"); v != "" { + namespaces = append(namespaces, v) + } else { + // unfortunately, we need to populate both of them + // Terraform 0.12.26 and higher uses the legacy mode ("-") + // Terraform 0.13.0 and higher uses the default mode ("hashicorp") + // because of the change in how providers are addressed in 0.13 + namespaces = append(namespaces, "-", "hashicorp") + } + if v := os.Getenv("TF_ACC_PROVIDER_HOST"); v != "" { + host = v + } + + for _, ns := range namespaces { + reattachInfo[strings.TrimSuffix(host, "/")+"/"+ + strings.TrimSuffix(ns, "/")+"/"+ + providerName] = config + } + + reattachStr, err := json.Marshal(reattachInfo) + if err != nil { + return err + } + wd.Setenv("TF_REATTACH_PROVIDERS", string(reattachStr)) + + // ok, let's call whatever Terraform command the test was trying to + // call, now that we know it'll attach back to that server we just + // started. + err = f() + if err != nil { + log.Printf("[WARN] Got error running Terraform: %s", err) + } + + // cancel the server so it'll return. Otherwise, this closeCh won't get + // closed, and we'll hang here. + cancel() + + // wait for the server to actually shut down; it may take a moment for + // it to clean up, or whatever. + <-closeCh + + // once we've run the Terraform command, let's remove the reattach + // information from the WorkingDir's environment. The WorkingDir will + // persist until the next call, but the server in the reattach info + // doesn't exist anymore at this point, so the reattach info is no + // longer valid. In theory it should be overwritten in the next call, + // but just to avoid any confusing bug reports, let's just unset the + // environment variable altogether. + wd.Unsetenv("TF_REATTACH_PROVIDERS") + + // return any error returned from the orchestration code running + // Terraform commands + return err +} + +// defaultPluginServeOpts builds ths *plugin.ServeOpts that you usually want to +// use when running runProviderCommand. It just sets the ProviderFunc to return +// the provider under test. +func defaultPluginServeOpts(wd *tftest.WorkingDir, providers map[string]*schema.Provider) *plugin.ServeOpts { + var provider *schema.Provider + for _, p := range providers { + provider = p + } + return &plugin.ServeOpts{ + GRPCProviderFunc: func() proto.ProviderServer { + return grpcplugin.NewGRPCProviderServer(provider) + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go index 6f74bcdc..3a8341dd 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go @@ -5,20 +5,16 @@ import ( "errors" "flag" "fmt" - "io" - "io/ioutil" "log" "os" "regexp" "strconv" "strings" - "syscall" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/logutils" + tftest "github.com/hashicorp/terraform-plugin-test" testing "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/terraform-plugin-sdk/v2/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" @@ -102,16 +98,7 @@ func TestMain(m interface { os.Exit(1) } } else { - if acctest.TestHelper == nil { - log.Fatal("Please configure the acctest binary driver") - } - exitCode := m.Run() - err := acctest.TestHelper.Close() - if err != nil { - log.Printf("Error cleaning up temporary test files: %s", err) - } - os.Exit(exitCode) } } @@ -460,49 +447,6 @@ type TestStep struct { providers map[string]*schema.Provider } -// Set to a file mask in sprintf format where %s is test name -const envLogPathMask = "TF_LOG_PATH_MASK" - -func logOutput(t testing.T) (logOutput io.Writer, err error) { - logOutput = ioutil.Discard - - logLevel := logging.LogLevel() - if logLevel == "" { - return - } - - logOutput = os.Stderr - - if logPath := os.Getenv(logging.EnvLogFile); logPath != "" { - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - if logPathMask := os.Getenv(envLogPathMask); logPathMask != "" { - // Escape special characters which may appear if we have subtests - testName := strings.Replace(t.Name(), "/", "__", -1) - - logPath := fmt.Sprintf(logPathMask, testName) - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - // This was the default since the beginning - logOutput = &logutils.LevelFilter{ - Levels: logging.ValidLevels, - MinLevel: logutils.LogLevel(logLevel), - Writer: logOutput, - } - - return -} - // ParallelTest performs an acceptance test on a resource, allowing concurrency // with other ParallelTest. // @@ -535,24 +479,27 @@ func Test(t testing.T, c TestCase) { return } - logWriter, err := logOutput(t) - if err != nil { - t.Error(fmt.Errorf("error setting up logging: %s", err)) - } - log.SetOutput(logWriter) + logging.SetOutput() // get instances of all providers, so we can use the individual // resources to shim the state during the tests. providers := make(map[string]*schema.Provider) + var provider string for name, pf := range c.ProviderFactories { p, err := pf() if err != nil { t.Fatal(err) } providers[name] = p + provider = name } for name, p := range c.Providers { providers[name] = p + provider = name + } + + if len(providers) != 1 { + t.Fatalf("Only the provider under test should be set in TestCase, got %d providers. Other providers can be used by adding their provider blocks to their config; they will automatically be downloaded as part of terraform init.", len(providers)) } // Auto-configure all providers. @@ -570,11 +517,19 @@ func Test(t testing.T, c TestCase) { c.PreCheck() } - if acctest.TestHelper == nil { - t.Fatal("Please configure the acctest binary driver") + sourceDir, err := os.Getwd() + if err != nil { + t.Fatalf("Error getting working dir: %s", err) } + helper := tftest.AutoInitProviderHelper(provider, sourceDir) + defer func(helper *tftest.Helper) { + err := helper.Close() + if err != nil { + log.Printf("Error cleaning up temporary test files: %s", err) + } + }(helper) - runNewTest(t, c, providers) + runNewTest(t, c, providers, helper) } // testProviderConfig takes the list of Providers in a TestCase and returns a diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go index 42a6dcf3..fe0bb388 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go @@ -11,16 +11,22 @@ import ( tftest "github.com/hashicorp/terraform-plugin-test" testing "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/terraform-plugin-sdk/v2/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func runPostTestDestroy(t testing.T, c TestCase, wd *tftest.WorkingDir) error { - wd.RequireDestroy(t) +func runPostTestDestroy(t testing.T, c TestCase, wd *tftest.WorkingDir, providers map[string]*schema.Provider) error { + runProviderCommand(func() error { + wd.RequireDestroy(t) + return nil + }, wd, defaultPluginServeOpts(wd, providers)) if c.CheckDestroy != nil { - statePostDestroy := getState(t, wd) + var statePostDestroy *terraform.State + runProviderCommand(func() error { + statePostDestroy = getState(t, wd) + return nil + }, wd, defaultPluginServeOpts(wd, providers)) if err := c.CheckDestroy(statePostDestroy); err != nil { return err @@ -30,16 +36,20 @@ func runPostTestDestroy(t testing.T, c TestCase, wd *tftest.WorkingDir) error { return nil } -func runNewTest(t testing.T, c TestCase, providers map[string]*schema.Provider) { +func runNewTest(t testing.T, c TestCase, providers map[string]*schema.Provider, helper *tftest.Helper) { spewConf := spew.NewDefaultConfig() spewConf.SortKeys = true - wd := acctest.TestHelper.RequireNewWorkingDir(t) + wd := helper.RequireNewWorkingDir(t) defer func() { - statePreDestroy := getState(t, wd) + var statePreDestroy *terraform.State + runProviderCommand(func() error { + statePreDestroy = getState(t, wd) + return nil + }, wd, defaultPluginServeOpts(wd, providers)) if !stateIsEmpty(statePreDestroy) { - runPostTestDestroy(t, c, wd) + runPostTestDestroy(t, c, wd, providers) } wd.Close() @@ -48,14 +58,16 @@ func runNewTest(t testing.T, c TestCase, providers map[string]*schema.Provider) providerCfg := testProviderConfig(c) wd.RequireSetConfig(t, providerCfg) - wd.RequireInit(t) + runProviderCommand(func() error { + wd.RequireInit(t) + return nil + }, wd, defaultPluginServeOpts(wd, providers)) // use this to track last step succesfully applied // acts as default for import tests var appliedCfg string for i, step := range c.Steps { - if step.PreConfig != nil { step.PreConfig() } @@ -70,10 +82,10 @@ func runNewTest(t testing.T, c TestCase, providers map[string]*schema.Provider) continue } } + step.providers = providers if step.ImportState { - step.providers = providers - err := testStepNewImportState(t, c, wd, step, appliedCfg) + err := testStepNewImportState(t, c, helper, wd, step, appliedCfg) if err != nil { t.Fatal(err) } @@ -149,7 +161,10 @@ func testIDRefresh(c TestCase, t testing.T, wd *tftest.WorkingDir, step TestStep defer wd.RequireSetConfig(t, step.Config) // Refresh! - wd.RequireRefresh(t) + runProviderCommand(func() error { + wd.RequireRefresh(t) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) state = getState(t, wd) // Verify attribute equivalence. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go index 285d50e6..43442823 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go @@ -2,6 +2,7 @@ package resource import ( "github.com/davecgh/go-spew/spew" + tfjson "github.com/hashicorp/terraform-json" tftest "github.com/hashicorp/terraform-plugin-test" testing "github.com/mitchellh/go-testing-interface" @@ -16,7 +17,14 @@ func testStepNewConfig(t testing.T, c TestCase, wd *tftest.WorkingDir, step Test idRefresh := c.IDRefreshName != "" if !step.Destroy { - state := getState(t, wd) + var state *terraform.State + err := runProviderCommand(func() error { + state = getState(t, wd) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) + if err != nil { + return err + } if err := testStepTaint(state, step); err != nil { t.Fatalf("Error when tainting resources: %s", err) } @@ -25,12 +33,21 @@ func testStepNewConfig(t testing.T, c TestCase, wd *tftest.WorkingDir, step Test wd.RequireSetConfig(t, step.Config) if !step.PlanOnly { - err := wd.Apply() + err := runProviderCommand(func() error { + return wd.Apply() + }, wd, defaultPluginServeOpts(wd, step.providers)) if err != nil { return err } - state := getState(t, wd) + var state *terraform.State + err = runProviderCommand(func() error { + state = getState(t, wd) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) + if err != nil { + return err + } if step.Check != nil { state.IsBinaryDrivenTest = true if err := step.Check(state); err != nil { @@ -42,8 +59,22 @@ func testStepNewConfig(t testing.T, c TestCase, wd *tftest.WorkingDir, step Test // Test for perpetual diffs by performing a plan, a refresh, and another plan // do a plan - wd.RequireCreatePlan(t) - plan := wd.RequireSavedPlan(t) + err := runProviderCommand(func() error { + wd.RequireCreatePlan(t) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) + if err != nil { + return err + } + + var plan *tfjson.Plan + err = runProviderCommand(func() error { + plan = wd.RequireSavedPlan(t) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) + if err != nil { + return err + } if !planIsEmpty(plan) { if step.ExpectNonEmptyPlan { @@ -56,12 +87,31 @@ func testStepNewConfig(t testing.T, c TestCase, wd *tftest.WorkingDir, step Test // do a refresh if !c.PreventPostDestroyRefresh { - wd.RequireRefresh(t) + err := runProviderCommand(func() error { + wd.RequireRefresh(t) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) + if err != nil { + return err + } } // do another plan - wd.RequireCreatePlan(t) - plan = wd.RequireSavedPlan(t) + err = runProviderCommand(func() error { + wd.RequireCreatePlan(t) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) + if err != nil { + return err + } + + err = runProviderCommand(func() error { + plan = wd.RequireSavedPlan(t) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) + if err != nil { + return err + } // check if plan is empty if !planIsEmpty(plan) { @@ -76,7 +126,14 @@ func testStepNewConfig(t testing.T, c TestCase, wd *tftest.WorkingDir, step Test // ID-ONLY REFRESH // If we've never checked an id-only refresh and our state isn't // empty, find the first resource and test it. - state := getState(t, wd) + var state *terraform.State + err = runProviderCommand(func() error { + state = getState(t, wd) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) + if err != nil { + return err + } if idRefresh && idRefreshCheck == nil && !state.Empty() { // Find the first non-nil resource in the state for _, m := range state.Modules { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go index ec1bc35b..9ce1bb8b 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go @@ -8,11 +8,10 @@ import ( tftest "github.com/hashicorp/terraform-plugin-test" testing "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/terraform-plugin-sdk/v2/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testStepNewImportState(t testing.T, c TestCase, wd *tftest.WorkingDir, step TestStep, cfg string) error { +func testStepNewImportState(t testing.T, c TestCase, helper *tftest.Helper, wd *tftest.WorkingDir, step TestStep, cfg string) error { spewConf := spew.NewDefaultConfig() spewConf.SortKeys = true @@ -21,7 +20,14 @@ func testStepNewImportState(t testing.T, c TestCase, wd *tftest.WorkingDir, step } // get state from check sequence - state := getState(t, wd) + var state *terraform.State + err := runProviderCommand(func() error { + state = getState(t, wd) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) + if err != nil { + t.Fatalf("Error getting state: %s", err) + } // Determine the ID to import var importId string @@ -50,12 +56,28 @@ func testStepNewImportState(t testing.T, c TestCase, wd *tftest.WorkingDir, step t.Fatal("Cannot import state with no specified config") } } - importWd := acctest.TestHelper.RequireNewWorkingDir(t) + importWd := helper.RequireNewWorkingDir(t) defer importWd.Close() importWd.RequireSetConfig(t, step.Config) - importWd.RequireInit(t) - importWd.RequireImport(t, step.ResourceName, importId) - importState := getState(t, wd) + runProviderCommand(func() error { + importWd.RequireInit(t) + return nil + }, importWd, defaultPluginServeOpts(importWd, step.providers)) + err = runProviderCommand(func() error { + importWd.RequireImport(t, step.ResourceName, importId) + return nil + }, importWd, defaultPluginServeOpts(importWd, step.providers)) + if err != nil { + t.Fatalf("Error running import: %s", err) + } + var importState *terraform.State + err = runProviderCommand(func() error { + importState = getState(t, wd) + return nil + }, wd, defaultPluginServeOpts(wd, step.providers)) + if err != nil { + t.Fatalf("Error getting state: %s", err) + } // Go through the imported state and verify if step.ImportStateCheck != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go index 83d40aa2..1f65d706 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go @@ -5,15 +5,20 @@ import ( "errors" "fmt" "log" + "os" "sort" + "strings" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/meta" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) +const uaEnvVar = "TF_APPEND_USER_AGENT" + var ReservedProviderFields = []string{ "alias", "version", @@ -243,12 +248,9 @@ func (p *Provider) ValidateResource( // // This won't be called at all if no provider configuration is given. func (p *Provider) Configure(ctx context.Context, c *terraform.ResourceConfig) diag.Diagnostics { - - var diags diag.Diagnostics - // No configuration if p.ConfigureFunc == nil && p.ConfigureContextFunc == nil { - return diags + return nil } sm := schemaMap(p.Schema) @@ -257,31 +259,30 @@ func (p *Provider) Configure(ctx context.Context, c *terraform.ResourceConfig) d // generate an intermediary "diff" although that is never exposed. diff, err := sm.Diff(ctx, nil, c, nil, p.meta, true) if err != nil { - return append(diags, diag.FromErr(err)) + return diag.FromErr(err) } data, err := sm.Data(nil, diff) if err != nil { - return append(diags, diag.FromErr(err)) + return diag.FromErr(err) } if p.ConfigureFunc != nil { meta, err := p.ConfigureFunc(data) if err != nil { - return append(diags, diag.FromErr(err)) + return diag.FromErr(err) } p.meta = meta } if p.ConfigureContextFunc != nil { - meta, ds := p.ConfigureContextFunc(ctx, data) - diags = append(diags, ds...) + meta, diags := p.ConfigureContextFunc(ctx, data) if diags.HasError() { return diags } p.meta = meta } - return diags + return nil } // Resources returns all the available resource types that this provider @@ -430,3 +431,31 @@ func (p *Provider) DataSources() []terraform.DataSource { return result } + +// UserAgent returns a string suitable for use in the User-Agent header of +// requests generated by the provider. The generated string contains the +// version of Terraform, the Plugin SDK, and the provider used to generate the +// request. `name` should be the hyphen-separated reporting name of the +// provider, and `version` should be the version of the provider. +// +// If TF_APPEND_USER_AGENT is set, its value will be appended to the returned +// string. +func (p *Provider) UserAgent(name, version string) string { + ua := fmt.Sprintf("Terraform/%s (+https://www.terraform.io) Terraform-Plugin-SDK/%s", p.TerraformVersion, meta.SDKVersionString()) + if name != "" { + ua += " " + name + if version != "" { + ua += "/" + version + } + } + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go index dfeaba3a..3e03f009 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go @@ -266,11 +266,10 @@ type CustomizeDiffFunc func(context.Context, *ResourceDiff, interface{}) error func (r *Resource) create(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { if r.Create != nil { - var diags diag.Diagnostics if err := r.Create(d, meta); err != nil { - diags = append(diags, diag.FromErr(err)) + return diag.FromErr(err) } - return diags + return nil } ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutCreate)) defer cancel() @@ -279,11 +278,10 @@ func (r *Resource) create(ctx context.Context, d *ResourceData, meta interface{} func (r *Resource) read(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { if r.Read != nil { - var diags diag.Diagnostics if err := r.Read(d, meta); err != nil { - diags = append(diags, diag.FromErr(err)) + return diag.FromErr(err) } - return diags + return nil } ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutRead)) defer cancel() @@ -292,11 +290,10 @@ func (r *Resource) read(ctx context.Context, d *ResourceData, meta interface{}) func (r *Resource) update(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { if r.Update != nil { - var diags diag.Diagnostics if err := r.Update(d, meta); err != nil { - diags = append(diags, diag.FromErr(err)) + return diag.FromErr(err) } - return diags + return nil } ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutUpdate)) defer cancel() @@ -305,11 +302,10 @@ func (r *Resource) update(ctx context.Context, d *ResourceData, meta interface{} func (r *Resource) delete(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { if r.Delete != nil { - var diags diag.Diagnostics if err := r.Delete(d, meta); err != nil { - diags = append(diags, diag.FromErr(err)) + return diag.FromErr(err) } - return diags + return nil } ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutDelete)) defer cancel() @@ -322,12 +318,9 @@ func (r *Resource) Apply( s *terraform.InstanceState, d *terraform.InstanceDiff, meta interface{}) (*terraform.InstanceState, diag.Diagnostics) { - - var diags diag.Diagnostics - data, err := schemaMap(r.Schema).Data(s, d) if err != nil { - return s, append(diags, diag.FromErr(err)) + return s, diag.FromErr(err) } if s != nil && data != nil { @@ -358,11 +351,12 @@ func (r *Resource) Apply( s = new(terraform.InstanceState) } + var diags diag.Diagnostics + if d.Destroy || d.RequiresNew() { if s.ID != "" { // Destroy the resource since it is created diags = append(diags, r.delete(ctx, data, meta)...) - if diags.HasError() { return r.recordCurrentSchemaVersion(data.State()), diags } @@ -380,7 +374,7 @@ func (r *Resource) Apply( // Reset the data to be stateless since we just destroyed data, err = schemaMap(r.Schema).Data(nil, d) if err != nil { - return nil, append(diags, diag.FromErr(err)) + return nil, append(diags, diag.FromErr(err)...) } // data was reset, need to re-apply the parsed timeouts @@ -485,18 +479,14 @@ func (r *Resource) ReadDataApply( d *terraform.InstanceDiff, meta interface{}, ) (*terraform.InstanceState, diag.Diagnostics) { - - var diags diag.Diagnostics - // Data sources are always built completely from scratch // on each read, so the source state is always nil. data, err := schemaMap(r.Schema).Data(nil, d) if err != nil { - return nil, append(diags, diag.FromErr(err)) + return nil, diag.FromErr(err) } - diags = append(diags, r.read(ctx, data, meta)...) - + diags := r.read(ctx, data, meta) state := data.State() if state != nil && state.ID == "" { // Data sources can set an ID if they want, but they aren't @@ -517,12 +507,9 @@ func (r *Resource) RefreshWithoutUpgrade( ctx context.Context, s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, diag.Diagnostics) { - - var diags diag.Diagnostics - // If the ID is already somehow blank, it doesn't exist if s.ID == "" { - return nil, diags + return nil, nil } rt := ResourceTimeout{} @@ -537,7 +524,7 @@ func (r *Resource) RefreshWithoutUpgrade( // affect our Read later. data, err := schemaMap(r.Schema).Data(s, nil) if err != nil { - return s, append(diags, diag.FromErr(err)) + return s, diag.FromErr(err) } data.timeouts = &rt @@ -547,17 +534,17 @@ func (r *Resource) RefreshWithoutUpgrade( exists, err := r.Exists(data, meta) if err != nil { - return s, append(diags, diag.FromErr(err)) + return s, diag.FromErr(err) } if !exists { - return nil, diags + return nil, nil } } data, err := schemaMap(r.Schema).Data(s, nil) if err != nil { - return s, append(diags, diag.FromErr(err)) + return s, diag.FromErr(err) } data.timeouts = &rt @@ -565,8 +552,7 @@ func (r *Resource) RefreshWithoutUpgrade( data.providerMeta = s.ProviderMeta } - diags = append(diags, r.read(ctx, data, meta)...) - + diags := r.read(ctx, data, meta) state := data.State() if state != nil && state.ID == "" { state = nil diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go index 2be5278b..1906343d 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go @@ -33,6 +33,7 @@ type ResourceData struct { multiReader *MultiLevelFieldReader setWriter *MapFieldWriter newState *terraform.InstanceState + partial bool once sync.Once isNew bool @@ -141,6 +142,21 @@ func (d *ResourceData) HasChange(key string) bool { return !reflect.DeepEqual(o, n) } +// Partial is a legacy function that was used for capturing state of specific +// attributes if an update only partially worked. Enabling this flag without +// setting any specific keys with the now removed SetPartial has a useful side +// effect of preserving all of the resource's previous state. Although confusing, +// it has been discovered that during an update when an error is returned, the +// proposed config is set into state, even without any calls to d.Set. +// +// In practice this default behavior goes mostly unnoticed since Terraform +// refreshes between operations by default. The state situation discussed is +// subject to further investigation and potential change. Until then, this +// function has been preserved for the specific usecase. +func (d *ResourceData) Partial(on bool) { + d.partial = on +} + // Set sets the value for the given key. // // If the key is invalid or the value is not a correct type, an error @@ -167,8 +183,12 @@ func (d *ResourceData) Set(key string, value interface{}) error { } err := d.setWriter.WriteField(strings.Split(key, "."), value) - if err != nil && d.panicOnError { - panic(err) + if err != nil { + if d.panicOnError { + panic(err) + } else { + log.Printf("[ERROR] setting state: %s", err) + } } return err } @@ -283,7 +303,9 @@ func (d *ResourceData) State() *terraform.InstanceState { rawMap := make(map[string]interface{}) for k := range d.schema { source := getSourceSet - + if d.partial { + source = getSourceState + } raw := d.get([]string{k}, source) if raw.Exists && !raw.Computed { rawMap[k] = raw.Value diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go index f39af0a9..29231193 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go @@ -31,9 +31,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -// Name of ENV variable which (if not empty) prefers panic over error -const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" - // Schema is used to describe the structure of a value. // // Read the documentation of the struct elements for important details. @@ -471,18 +468,7 @@ type InternalMap = schemaMap type schemaMap map[string]*Schema func (m schemaMap) panicOnError() bool { - if env := os.Getenv(PanicOnErr); env == "" { - // default to true - return true - } else if b, err := strconv.ParseBool(env); err == nil { - // allow opt out - return b - } else { - // default to true for anything set, this is backwards compatible - // with the previous implementation - log.Printf("[WARN] %s=%s not parsable: %s, defaulting to true", PanicOnErr, env, err) - return true - } + return os.Getenv("TF_ACC") != "" } // Data returns a ResourceData for the given schema, state, and diff. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go index 1518391e..a510e60f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go @@ -8,7 +8,7 @@ import ( "strconv" "sync" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode" ) // HashString hashes strings. If you want a Set of strings, this is the diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/hashcode/hashcode.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go new file mode 100644 index 00000000..37a80a16 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go @@ -0,0 +1,36 @@ +// The meta package provides a location to set the release version +// and any other relevant metadata for the SDK. +// +// This package should not import any other SDK packages. +package meta + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +var SDKVersion = "2.0.0" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var SDKPrerelease = "rc.2" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(SDKVersion)) +} + +// VersionString returns the complete version string, including prerelease +func SDKVersionString() string { + if SDKPrerelease != "" { + return fmt.Sprintf("%s-%s", SDKVersion, SDKPrerelease) + } + return SDKVersion +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go new file mode 100644 index 00000000..6f30f1d1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go @@ -0,0 +1,102 @@ +package plugin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/signal" + "time" + + "github.com/hashicorp/go-plugin" +) + +// ReattachConfig holds the information Terraform needs to be able to attach +// itself to a provider process, so it can drive the process. +type ReattachConfig struct { + Protocol string + Pid int + Test bool + Addr ReattachConfigAddr +} + +// ReattachConfigAddr is a JSON-encoding friendly version of net.Addr. +type ReattachConfigAddr struct { + Network string + String string +} + +// DebugServe starts a plugin server in debug mode; this should only be used +// when the provider will manage its own lifecycle. It is not recommended for +// normal usage; Serve is the correct function for that. +func DebugServe(ctx context.Context, opts *ServeOpts) (ReattachConfig, <-chan struct{}, error) { + reattachCh := make(chan *plugin.ReattachConfig) + closeCh := make(chan struct{}) + + opts.TestConfig = &plugin.ServeTestConfig{ + Context: ctx, + ReattachConfigCh: reattachCh, + CloseCh: closeCh, + } + + go Serve(opts) + + var config *plugin.ReattachConfig + select { + case config = <-reattachCh: + case <-time.After(2 * time.Second): + return ReattachConfig{}, closeCh, errors.New("timeout waiting on reattach config") + } + + if config == nil { + return ReattachConfig{}, closeCh, errors.New("nil reattach config received") + } + + return ReattachConfig{ + Protocol: string(config.Protocol), + Pid: config.Pid, + Test: config.Test, + Addr: ReattachConfigAddr{ + Network: config.Addr.Network(), + String: config.Addr.String(), + }, + }, closeCh, nil +} + +// Debug starts a debug server and controls its lifecycle, printing the +// information needed for Terraform to connect to the provider to stdout. +// os.Interrupt will be captured and used to stop the server. +func Debug(ctx context.Context, providerAddr string, opts *ServeOpts) error { + ctx, cancel := context.WithCancel(ctx) + // Ctrl-C will stop the server + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer func() { + signal.Stop(sigCh) + cancel() + }() + config, closeCh, err := DebugServe(ctx, opts) + if err != nil { + return fmt.Errorf("Error launching debug server: %w", err) + } + go func() { + select { + case <-sigCh: + cancel() + case <-ctx.Done(): + } + }() + reattachStr, err := json.Marshal(map[string]ReattachConfig{ + providerAddr: config, + }) + if err != nil { + return fmt.Errorf("Error building reattach string: %w", err) + } + + fmt.Printf("Provider server started; to attach Terraform, set TF_REATTACH_PROVIDERS to the following:\n%s\n", string(reattachStr)) + + // wait for the server to be done + <-closeCh + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go index ca49542b..e5649d07 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go @@ -3,6 +3,7 @@ package plugin import ( "context" + hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "google.golang.org/grpc" @@ -34,6 +35,16 @@ type ServeOpts struct { // Wrapped versions of the above plugins will automatically shimmed and // added to the GRPC functions when possible. GRPCProviderFunc GRPCProviderFunc + + // Logger is the logger that go-plugin will use. + Logger hclog.Logger + + // TestConfig should only be set when the provider is being tested; it + // will opt out of go-plugin's lifecycle management and other features, + // and will use the supplied configuration options to control the + // plugin's lifecycle and communicate connection information. See the + // go-plugin GoDoc for more information. + TestConfig *plugin.ServeTestConfig } // Serve serves a plugin. This function never returns and should be the final @@ -48,7 +59,6 @@ func Serve(opts *ServeOpts) { } provider := opts.GRPCProviderFunc() - plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: Handshake, VersionedPlugins: map[int]plugin.PluginSet{ @@ -66,5 +76,7 @@ func Serve(opts *ServeOpts) { return handler(ctx, req) }))...) }, + Logger: opts.Logger, + Test: opts.TestConfig, }) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go index 56e974ac..87f7610a 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "log" + "os" "reflect" "sort" "strconv" @@ -611,9 +612,13 @@ func (s *State) ensureHasLineage() { panic(fmt.Errorf("Failed to generate lineage: %v", err)) } s.Lineage = lineage - log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + if os.Getenv("TF_ACC") == "" || os.Getenv("TF_ACC_STATE_LINEAGE") == "1" { + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } } else { - log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + if os.Getenv("TF_ACC") == "" || os.Getenv("TF_ACC_STATE_LINEAGE") == "1" { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/config.go b/vendor/github.com/hashicorp/terraform-plugin-test/config.go index a40460e9..306f1ba2 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-test/config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-test/config.go @@ -27,7 +27,7 @@ func DiscoverConfig(pluginName string, sourceDir string) (*Config, error) { if tfVersion == "" { tfExec = FindTerraform() if tfExec == "" { - return nil, fmt.Errorf("unable to find 'terraform' executable for testing; either place it in PATH or set TF_ACC_TERRAFORM_PATH explicitly to a direct executable path") + return nil, fmt.Errorf("unable to find 'terraform' executable for testing; either set a version with TF_ACC_TERRAFORM_VERSION, place it in PATH, or set TF_ACC_TERRAFORM_PATH explicitly to a direct executable path") } } else { tfExec, err = InstallTerraform(tfVersion) diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/terraform.go b/vendor/github.com/hashicorp/terraform-plugin-test/terraform.go index d2f118e1..d32496b0 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-test/terraform.go +++ b/vendor/github.com/hashicorp/terraform-plugin-test/terraform.go @@ -4,13 +4,14 @@ import ( "bytes" "encoding/json" "fmt" - getter "github.com/hashicorp/go-getter" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strings" + + getter "github.com/hashicorp/go-getter" ) const releaseHost = "https://releases.hashicorp.com" @@ -82,7 +83,7 @@ func InstallTerraform(tfVersion string) (string, error) { } // getTerraformEnv returns the appropriate Env for the Terraform command. -func getTerraformEnv() []string { +func (wd *WorkingDir) getTerraformEnv() []string { var env []string for _, e := range os.Environ() { env = append(env, e) @@ -100,10 +101,17 @@ func getTerraformEnv() []string { env = append(env, "TF_LOG=") // so logging can't pollute our stderr output env = append(env, "TF_INPUT=0") + // don't propagate the magic cookie + env = append(env, "TF_PLUGIN_MAGIC_COOKIE=") + if p := os.Getenv("TF_ACC_LOG_PATH"); p != "" { env = append(env, "TF_LOG=TRACE") env = append(env, "TF_LOG_PATH="+p) } + + for k, v := range wd.env { + env = append(env, k+"="+v) + } return env } @@ -113,7 +121,7 @@ func (wd *WorkingDir) runTerraform(args ...string) error { allArgs := []string{"terraform"} allArgs = append(allArgs, args...) - env := getTerraformEnv() + env := wd.getTerraformEnv() var errBuf strings.Builder @@ -138,7 +146,7 @@ func (wd *WorkingDir) runTerraformJSON(target interface{}, args ...string) error allArgs := []string{"terraform"} allArgs = append(allArgs, args...) - env := getTerraformEnv() + env := wd.getTerraformEnv() var outBuf bytes.Buffer var errBuf strings.Builder diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/working_dir.go b/vendor/github.com/hashicorp/terraform-plugin-test/working_dir.go index 7f64706f..0b3d8001 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-test/working_dir.go +++ b/vendor/github.com/hashicorp/terraform-plugin-test/working_dir.go @@ -24,6 +24,8 @@ type WorkingDir struct { // configDir contains the singular config file generated for each test configDir string + + env map[string]string } // Close deletes the directories and files created to represent the receiving @@ -33,6 +35,24 @@ func (wd *WorkingDir) Close() error { return os.RemoveAll(wd.baseDir) } +// Setenv sets an environment variable on the WorkingDir. +func (wd *WorkingDir) Setenv(envVar, val string) { + if wd.env == nil { + wd.env = map[string]string{} + } + wd.env[envVar] = val +} + +// Unsetenv removes an environment variable from the WorkingDir. +func (wd *WorkingDir) Unsetenv(envVar string) { + delete(wd.env, envVar) +} + +// GetHelper returns the Helper set on the WorkingDir. +func (wd *WorkingDir) GetHelper() *Helper { + return wd.h +} + // SetConfig sets a new configuration for the working directory. // // This must be called at least once before any call to Init, Plan, Apply, or @@ -114,7 +134,7 @@ func (wd *WorkingDir) RequireClearPlan(t TestControl) { } func (wd *WorkingDir) init(pluginDir string) error { - args := []string{"init"} + args := []string{"init", wd.configDir} args = append(args, wd.baseArgs...) return wd.runTerraform(args...) } diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/LICENSE b/vendor/github.com/iwarapter/pingaccess-sdk-go/LICENSE new file mode 100644 index 00000000..e4dcb642 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Iain Adams + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/accessTokenValidators.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/accessTokenValidators.go index 809e331c..f11debf3 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/accessTokenValidators.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/accessTokenValidators.go @@ -9,6 +9,15 @@ import ( type AccessTokenValidatorsService service +type AccessTokenValidatorsAPI interface { + GetAccessTokenValidatorsCommand(input *GetAccessTokenValidatorsCommandInput) (result *AccessTokenValidatorsView, resp *http.Response, err error) + AddAccessTokenValidatorCommand(input *AddAccessTokenValidatorCommandInput) (result *AccessTokenValidatorView, resp *http.Response, err error) + GetAccessTokenValidatorDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) + DeleteAccessTokenValidatorCommand(input *DeleteAccessTokenValidatorCommandInput) (resp *http.Response, err error) + GetAccessTokenValidatorCommand(input *GetAccessTokenValidatorCommandInput) (result *AccessTokenValidatorView, resp *http.Response, err error) + UpdateAccessTokenValidatorCommand(input *UpdateAccessTokenValidatorCommandInput) (result *AccessTokenValidatorView, resp *http.Response, err error) +} + //GetAccessTokenValidatorsCommand - Get all Access Token Validators //RequestType: GET //Input: input *GetAccessTokenValidatorsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/acme.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/acme.go index 1dab38b5..3c6bd7ef 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/acme.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/acme.go @@ -9,6 +9,23 @@ import ( type AcmeService service +type AcmeAPI interface { + GetAcmeServersCommand(input *GetAcmeServersCommandInput) (result *AcmeServersView, resp *http.Response, err error) + AddAcmeServerCommand(input *AddAcmeServerCommandInput) (result *AcmeServerView, resp *http.Response, err error) + GetDefaultAcmeServerCommand() (result *LinkView, resp *http.Response, err error) + UpdateDefaultAcmeServerCommand(input *UpdateDefaultAcmeServerCommandInput) (result *LinkView, resp *http.Response, err error) + DeleteAcmeServerCommand(input *DeleteAcmeServerCommandInput) (result *AcmeServerView, resp *http.Response, err error) + GetAcmeServerCommand(input *GetAcmeServerCommandInput) (result *AcmeServerView, resp *http.Response, err error) + GetAcmeAccountsCommand(input *GetAcmeAccountsCommandInput) (result *AcmeAccountView, resp *http.Response, err error) + AddAcmeAccountCommand(input *AddAcmeAccountCommandInput) (result *AcmeAccountView, resp *http.Response, err error) + DeleteAcmeAccountCommand(input *DeleteAcmeAccountCommandInput) (result *AcmeAccountView, resp *http.Response, err error) + GetAcmeAccountCommand(input *GetAcmeAccountCommandInput) (result *AcmeAccountView, resp *http.Response, err error) + GetAcmeCertificateRequestsCommand(input *GetAcmeCertificateRequestsCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) + AddAcmeCertificateRequestCommand(input *AddAcmeCertificateRequestCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) + DeleteAcmeCertificateRequestCommand(input *DeleteAcmeCertificateRequestCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) + GetAcmeCertificateRequestCommand(input *GetAcmeCertificateRequestCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) +} + //GetAcmeServersCommand - Get all ACME Servers //RequestType: GET //Input: input *GetAcmeServersCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminConfig.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminConfig.go index 850d4256..c3671eb7 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminConfig.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminConfig.go @@ -9,6 +9,18 @@ import ( type AdminConfigService service +type AdminConfigAPI interface { + DeleteAdminConfigurationCommand() (resp *http.Response, err error) + GetAdminConfigurationCommand() (result *AdminConfigurationView, resp *http.Response, err error) + UpdateAdminConfigurationCommand(input *UpdateAdminConfigurationCommandInput) (result *AdminConfigurationView, resp *http.Response, err error) + GetReplicaAdminsCommand() (result *ReplicaAdminsView, resp *http.Response, err error) + AddReplicaAdminCommand(input *AddReplicaAdminCommandInput) (result *ReplicaAdminView, resp *http.Response, err error) + DeleteReplicaAdminCommand(input *DeleteReplicaAdminCommandInput) (resp *http.Response, err error) + GetReplicaAdminCommand(input *GetReplicaAdminCommandInput) (result *ReplicaAdminView, resp *http.Response, err error) + UpdateAdminReplicaCommand(input *UpdateAdminReplicaCommandInput) (result *ReplicaAdminView, resp *http.Response, err error) + GetAdminReplicaFileCommand(input *GetAdminReplicaFileCommandInput) (resp *http.Response, err error) +} + //DeleteAdminConfigurationCommand - Resets the Admin Config to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminSessionInfo.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminSessionInfo.go index ed84387a..3f435b39 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminSessionInfo.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminSessionInfo.go @@ -8,6 +8,12 @@ import ( type AdminSessionInfoService service +type AdminSessionInfoAPI interface { + AdminSessionDeleteCommand() (resp *http.Response, err error) + AdminSessionInfoCommand() (result *SessionInfo, resp *http.Response, err error) + AdminSessionInfoCheckCommand() (result *SessionInfo, resp *http.Response, err error) +} + //AdminSessionDeleteCommand - Invalidate the Admin session information //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/agents.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/agents.go index 5b6e5b70..5ce40e08 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/agents.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/agents.go @@ -9,6 +9,17 @@ import ( type AgentsService service +type AgentsAPI interface { + GetAgentsCommand(input *GetAgentsCommandInput) (result *AgentsView, resp *http.Response, err error) + AddAgentCommand(input *AddAgentCommandInput) (result *AgentView, resp *http.Response, err error) + GetAgentCertificatesCommand(input *GetAgentCertificatesCommandInput) (result *AgentCertificatesView, resp *http.Response, err error) + GetAgentCertificateCommand(input *GetAgentCertificateCommandInput) (result *AgentCertificateView, resp *http.Response, err error) + GetAgentFileCommand(input *GetAgentFileCommandInput) (resp *http.Response, err error) + DeleteAgentCommand(input *DeleteAgentCommandInput) (resp *http.Response, err error) + GetAgentCommand(input *GetAgentCommandInput) (result *AgentView, resp *http.Response, err error) + UpdateAgentCommand(input *UpdateAgentCommandInput) (result *AgentView, resp *http.Response, err error) +} + //GetAgentsCommand - Get all Agents //RequestType: GET //Input: input *GetAgentsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/applications.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/applications.go index a7657c05..5cf1ba8b 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/applications.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/applications.go @@ -9,6 +9,26 @@ import ( type ApplicationsService service +type ApplicationsAPI interface { + GetApplicationsCommand(input *GetApplicationsCommandInput) (result *ApplicationsView, resp *http.Response, err error) + AddApplicationCommand(input *AddApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) + DeleteReservedApplicationCommand() (resp *http.Response, err error) + GetReservedApplicationCommand() (result *ReservedApplicationView, resp *http.Response, err error) + UpdateReservedApplicationCommand(input *UpdateReservedApplicationCommandInput) (result *ReservedApplicationView, resp *http.Response, err error) + GetResourcesCommand(input *GetResourcesCommandInput) (result *ResourcesView, resp *http.Response, err error) + GetApplicationsResourcesMethodsCommand() (result *MethodsView, resp *http.Response, err error) + DeleteApplicationResourceCommand(input *DeleteApplicationResourceCommandInput) (resp *http.Response, err error) + GetApplicationResourceCommand(input *GetApplicationResourceCommandInput) (result *ResourceView, resp *http.Response, err error) + UpdateApplicationResourceCommand(input *UpdateApplicationResourceCommandInput) (result *ResourceView, resp *http.Response, err error) + DeleteApplicationCommand(input *DeleteApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) + GetApplicationCommand(input *GetApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) + UpdateApplicationCommand(input *UpdateApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) + GetResourceMatchingEvaluationOrderCommand(input *GetResourceMatchingEvaluationOrderCommandInput) (result *ResourceMatchingEvaluationOrderView, resp *http.Response, err error) + GetApplicationResourcesCommand(input *GetApplicationResourcesCommandInput) (result *ResourcesView, resp *http.Response, err error) + AddApplicationResourceCommand(input *AddApplicationResourceCommandInput) (result *ResourceView, resp *http.Response, err error) + GetResourceAutoOrderCommand(input *GetResourceAutoOrderCommandInput) (result *ResourceOrderView, resp *http.Response, err error) +} + //GetApplicationsCommand - Get all Applications //RequestType: GET //Input: input *GetApplicationsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/auth.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/auth.go index 8c0d32e5..1ef6cd0f 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/auth.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/auth.go @@ -8,6 +8,21 @@ import ( type AuthService service +type AuthAPI interface { + DeleteBasicAuthCommand() (resp *http.Response, err error) + GetBasicAuthCommand() (result *BasicConfig, resp *http.Response, err error) + UpdateBasicAuthCommand(input *UpdateBasicAuthCommandInput) (result *BasicAuthConfigView, resp *http.Response, err error) + DeleteOAuthAuthCommand() (resp *http.Response, err error) + GetOAuthAuthCommand() (result *OAuthConfigView, resp *http.Response, err error) + UpdateOAuthAuthCommand(input *UpdateOAuthAuthCommandInput) (result *OAuthConfigView, resp *http.Response, err error) + DeleteOidcAuthCommand() (resp *http.Response, err error) + GetOidcAuthCommand() (result *OidcConfigView, resp *http.Response, err error) + UpdateOidcAuthCommand(input *UpdateOidcAuthCommandInput) (result *OidcConfigView, resp *http.Response, err error) + DeleteAdminBasicWebSessionCommand() (resp *http.Response, err error) + GetAdminBasicWebSessionCommand() (result *AdminBasicWebSessionView, resp *http.Response, err error) + UpdateAdminBasicWebSessionCommand(input *UpdateAdminBasicWebSessionCommandInput) (result *AdminBasicWebSessionView, resp *http.Response, err error) +} + //DeleteBasicAuthCommand - Resets the HTTP Basic Authentication configuration to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authTokenManagement.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authTokenManagement.go index 4e448f4f..0185c69a 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authTokenManagement.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authTokenManagement.go @@ -8,6 +8,14 @@ import ( type AuthTokenManagementService service +type AuthTokenManagementAPI interface { + DeleteAuthTokenManagementCommand() (resp *http.Response, err error) + GetAuthTokenManagementCommand() (result *AuthTokenManagementView, resp *http.Response, err error) + UpdateAuthTokenManagementCommand(input *UpdateAuthTokenManagementCommandInput) (result *AuthTokenManagementView, resp *http.Response, err error) + GetAuthTokenKeySetCommand() (result *KeySetView, resp *http.Response, err error) + UpdateAuthTokenKeySetCommand(input *UpdateAuthTokenKeySetCommandInput) (result *KeySetView, resp *http.Response, err error) +} + //DeleteAuthTokenManagementCommand - Resets the Auth Token Management configuration to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authnReqLists.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authnReqLists.go index 3605e9e1..4cffc600 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authnReqLists.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authnReqLists.go @@ -9,6 +9,14 @@ import ( type AuthnReqListsService service +type AuthnReqListsAPI interface { + GetAuthnReqListsCommand(input *GetAuthnReqListsCommandInput) (result *AuthnReqListsView, resp *http.Response, err error) + AddAuthnReqListCommand(input *AddAuthnReqListCommandInput) (result *AuthnReqListView, resp *http.Response, err error) + DeleteAuthnReqListCommand(input *DeleteAuthnReqListCommandInput) (resp *http.Response, err error) + GetAuthnReqListCommand(input *GetAuthnReqListCommandInput) (result *AuthnReqListView, resp *http.Response, err error) + UpdateAuthnReqListCommand(input *UpdateAuthnReqListCommandInput) (result *AuthnReqListView, resp *http.Response, err error) +} + //GetAuthnReqListsCommand - Get all Authentication Requirement Lists //RequestType: GET //Input: input *GetAuthnReqListsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/backup.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/backup.go index 0531e8b0..fd67ac14 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/backup.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/backup.go @@ -8,6 +8,10 @@ import ( type BackupService service +type BackupAPI interface { + BackupCommand() (resp *http.Response, err error) +} + //BackupCommand - Create a local database backup //RequestType: GET //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/certificates.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/certificates.go index 781484e4..1935d24b 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/certificates.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/certificates.go @@ -9,6 +9,15 @@ import ( type CertificatesService service +type CertificatesAPI interface { + GetTrustedCerts(input *GetTrustedCertsInput) (result *TrustedCertsView, resp *http.Response, err error) + ImportTrustedCert(input *ImportTrustedCertInput) (result *TrustedCertView, resp *http.Response, err error) + DeleteTrustedCertCommand(input *DeleteTrustedCertCommandInput) (resp *http.Response, err error) + GetTrustedCert(input *GetTrustedCertInput) (result *TrustedCertView, resp *http.Response, err error) + UpdateTrustedCert(input *UpdateTrustedCertInput) (result *TrustedCertView, resp *http.Response, err error) + ExportTrustedCert(input *ExportTrustedCertInput) (resp *http.Response, err error) +} + //GetTrustedCerts - Get all Certificates //RequestType: GET //Input: input *GetTrustedCertsInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config.go index cd401af6..79a8180e 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config.go @@ -9,6 +9,18 @@ import ( type ConfigService service +type ConfigAPI interface { + ConfigExportCommand() (result *ExportData, resp *http.Response, err error) + GetConfigExportWorkflowsCommand() (result *ConfigStatusesView, resp *http.Response, err error) + AddConfigExportWorkflowCommand() (result *ConfigStatusView, resp *http.Response, err error) + GetConfigExportWorkflowCommand(input *GetConfigExportWorkflowCommandInput) (result *ConfigStatusView, resp *http.Response, err error) + GetConfigExportWorkflowDataCommand(input *GetConfigExportWorkflowDataCommandInput) (result *ExportData, resp *http.Response, err error) + ConfigImportCommand(input *ConfigImportCommandInput) (resp *http.Response, err error) + GetConfigImportWorkflowsCommand() (result *ConfigStatusesView, resp *http.Response, err error) + AddConfigImportWorkflowCommand(input *AddConfigImportWorkflowCommandInput) (resp *http.Response, err error) + GetConfigImportWorkflowCommand(input *GetConfigImportWorkflowCommandInput) (result *ConfigStatusView, resp *http.Response, err error) +} + //ConfigExportCommand - [Attention: The endpoint "/config/export" is deprecated. The "config/export/workflows" endpoint should be used instead.] Export a JSON backup of the entire system //RequestType: GET //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engineListeners.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engineListeners.go index 7a9df76f..a58e11bc 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engineListeners.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engineListeners.go @@ -9,6 +9,14 @@ import ( type EngineListenersService service +type EngineListenersAPI interface { + GetEngineListenersCommand(input *GetEngineListenersCommandInput) (result *EngineListenersView, resp *http.Response, err error) + AddEngineListenerCommand(input *AddEngineListenerCommandInput) (result *EngineListenerView, resp *http.Response, err error) + DeleteEngineListenerCommand(input *DeleteEngineListenerCommandInput) (resp *http.Response, err error) + GetEngineListenerCommand(input *GetEngineListenerCommandInput) (result *EngineListenerView, resp *http.Response, err error) + UpdateEngineListenerCommand(input *UpdateEngineListenerCommandInput) (result *EngineListenerView, resp *http.Response, err error) +} + //GetEngineListenersCommand - Get all Engine Listeners //RequestType: GET //Input: input *GetEngineListenersCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engines.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engines.go index b782ddd3..c5082693 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engines.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engines.go @@ -9,6 +9,18 @@ import ( type EnginesService service +type EnginesAPI interface { + GetEnginesCommand(input *GetEnginesCommandInput) (result *EnginesView, resp *http.Response, err error) + AddEngineCommand(input *AddEngineCommandInput) (result *EngineView, resp *http.Response, err error) + GetEngineCertificatesCommand(input *GetEngineCertificatesCommandInput) (result *EngineCertificateView, resp *http.Response, err error) + GetEngineCertificateCommand(input *GetEngineCertificateCommandInput) (result *EngineCertificateView, resp *http.Response, err error) + GetEngineStatusCommand() (result *EngineHealthStatusView, resp *http.Response, err error) + DeleteEngineCommand(input *DeleteEngineCommandInput) (resp *http.Response, err error) + GetEngineCommand(input *GetEngineCommandInput) (result *EngineView, resp *http.Response, err error) + UpdateEngineCommand(input *UpdateEngineCommandInput) (result *EngineView, resp *http.Response, err error) + GetEngineConfigFileCommand(input *GetEngineConfigFileCommandInput) (resp *http.Response, err error) +} + //GetEnginesCommand - Get all Engines //RequestType: GET //Input: input *GetEnginesCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/globalUnprotectedResources.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/globalUnprotectedResources.go index c39c01be..192265f7 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/globalUnprotectedResources.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/globalUnprotectedResources.go @@ -9,6 +9,14 @@ import ( type GlobalUnprotectedResourcesService service +type GlobalUnprotectedResourcesAPI interface { + GetGlobalUnprotectedResourcesCommand(input *GetGlobalUnprotectedResourcesCommandInput) (result *GlobalUnprotectedResourcesView, resp *http.Response, err error) + AddGlobalUnprotectedResourceCommand(input *AddGlobalUnprotectedResourceCommandInput) (result *GlobalUnprotectedResourceView, resp *http.Response, err error) + DeleteGlobalUnprotectedResourceCommand(input *DeleteGlobalUnprotectedResourceCommandInput) (resp *http.Response, err error) + GetGlobalUnprotectedResourceCommand(input *GetGlobalUnprotectedResourceCommandInput) (result *GlobalUnprotectedResourceView, resp *http.Response, err error) + UpdateGlobalUnprotectedResourceCommand(input *UpdateGlobalUnprotectedResourceCommandInput) (result *GlobalUnprotectedResourceView, resp *http.Response, err error) +} + //GetGlobalUnprotectedResourcesCommand - Get all Global Unprotected Resources //RequestType: GET //Input: input *GetGlobalUnprotectedResourcesCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/highAvailability.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/highAvailability.go index 5e311f25..8c0d1991 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/highAvailability.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/highAvailability.go @@ -9,6 +9,23 @@ import ( type HighAvailabilityService service +type HighAvailabilityAPI interface { + GetAvailabilityProfilesCommand(input *GetAvailabilityProfilesCommandInput) (result *AvailabilityProfilesView, resp *http.Response, err error) + AddAvailabilityProfileCommand(input *AddAvailabilityProfileCommandInput) (result *AvailabilityProfileView, resp *http.Response, err error) + GetAvailabilityProfileDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) + GetAvailabilityProfileDescriptorCommand(input *GetAvailabilityProfileDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) + DeleteAvailabilityProfileCommand(input *DeleteAvailabilityProfileCommandInput) (resp *http.Response, err error) + GetAvailabilityProfileCommand(input *GetAvailabilityProfileCommandInput) (result *AvailabilityProfileView, resp *http.Response, err error) + UpdateAvailabilityProfileCommand(input *UpdateAvailabilityProfileCommandInput) (result *AvailabilityProfileView, resp *http.Response, err error) + GetLoadBalancingStrategiesCommand(input *GetLoadBalancingStrategiesCommandInput) (result *LoadBalancingStrategiesView, resp *http.Response, err error) + AddLoadBalancingStrategyCommand(input *AddLoadBalancingStrategyCommandInput) (result *LoadBalancingStrategyView, resp *http.Response, err error) + GetLoadBalancingStrategyDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) + GetLoadBalancingStrategyDescriptorCommand(input *GetLoadBalancingStrategyDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) + DeleteLoadBalancingStrategyCommand(input *DeleteLoadBalancingStrategyCommandInput) (resp *http.Response, err error) + GetLoadBalancingStrategyCommand(input *GetLoadBalancingStrategyCommandInput) (result *LoadBalancingStrategyView, resp *http.Response, err error) + UpdateLoadBalancingStrategyCommand(input *UpdateLoadBalancingStrategyCommandInput) (result *LoadBalancingStrategyView, resp *http.Response, err error) +} + //GetAvailabilityProfilesCommand - Get all Availability Profiles //RequestType: GET //Input: input *GetAvailabilityProfilesCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/hsmProviders.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/hsmProviders.go index 78098ec3..2d011113 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/hsmProviders.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/hsmProviders.go @@ -9,6 +9,15 @@ import ( type HsmProvidersService service +type HsmProvidersAPI interface { + GetHsmProvidersCommand(input *GetHsmProvidersCommandInput) (result *HsmProviderView, resp *http.Response, err error) + AddHsmProviderCommand(input *AddHsmProviderCommandInput) (result *HsmProviderView, resp *http.Response, err error) + GetHsmProviderDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) + DeleteHsmProviderCommand(input *DeleteHsmProviderCommandInput) (resp *http.Response, err error) + GetHsmProviderCommand(input *GetHsmProviderCommandInput) (result *HsmProviderView, resp *http.Response, err error) + UpdateHsmProviderCommand(input *UpdateHsmProviderCommandInput) (result *HsmProviderView, resp *http.Response, err error) +} + //GetHsmProvidersCommand - Get all HSM Providers //RequestType: GET //Input: input *GetHsmProvidersCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpConfig.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpConfig.go index db8d1feb..7edbd000 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpConfig.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpConfig.go @@ -8,6 +8,21 @@ import ( type HttpConfigService service +type HttpConfigAPI interface { + DeleteHttpMonitoringCommand() (resp *http.Response, err error) + GetHttpMonitoringCommand() (result *HttpMonitoringView, resp *http.Response, err error) + UpdateHttpMonitoringCommand(input *UpdateHttpMonitoringCommandInput) (result *HttpMonitoringView, resp *http.Response, err error) + DeleteHostSourceCommand() (resp *http.Response, err error) + GetHostSourceCommand() (result *HostMultiValueSourceView, resp *http.Response, err error) + UpdateHostSourceCommand(input *UpdateHostSourceCommandInput) (result *HostMultiValueSourceView, resp *http.Response, err error) + DeleteIpSourceCommand() (resp *http.Response, err error) + GetIpSourceCommand() (result *IpMultiValueSourceView, resp *http.Response, err error) + UpdateIpSourceCommand(input *UpdateIpSourceCommandInput) (result *IpMultiValueSourceView, resp *http.Response, err error) + DeleteProtoSourceCommand() (resp *http.Response, err error) + GetProtoSourceCommand() (result *ProtocolSourceView, resp *http.Response, err error) + UpdateProtocolSourceCommand(input *UpdateProtocolSourceCommandInput) (result *ProtocolSourceView, resp *http.Response, err error) +} + //DeleteHttpMonitoringCommand - Resets the HTTP monitoring auditLevel to default value //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpsListeners.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpsListeners.go index cc90a135..56deb1de 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpsListeners.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpsListeners.go @@ -9,6 +9,12 @@ import ( type HttpsListenersService service +type HttpsListenersAPI interface { + GetHttpsListenersCommand(input *GetHttpsListenersCommandInput) (result *HttpsListenersView, resp *http.Response, err error) + GetHttpsListenerCommand(input *GetHttpsListenerCommandInput) (result *HttpsListenerView, resp *http.Response, err error) + UpdateHttpsListener(input *UpdateHttpsListenerInput) (result *HttpsListenerView, resp *http.Response, err error) +} + //GetHttpsListenersCommand - Get all HTTPS Listeners //RequestType: GET //Input: input *GetHttpsListenersCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/identityMappings.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/identityMappings.go index 8b1e9687..02be8649 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/identityMappings.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/identityMappings.go @@ -9,6 +9,16 @@ import ( type IdentityMappingsService service +type IdentityMappingsAPI interface { + GetIdentityMappingsCommand(input *GetIdentityMappingsCommandInput) (result *IdentityMappingsView, resp *http.Response, err error) + AddIdentityMappingCommand(input *AddIdentityMappingCommandInput) (result *IdentityMappingView, resp *http.Response, err error) + GetIdentityMappingDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) + GetIdentityMappingDescriptorCommand(input *GetIdentityMappingDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) + DeleteIdentityMappingCommand(input *DeleteIdentityMappingCommandInput) (resp *http.Response, err error) + GetIdentityMappingCommand(input *GetIdentityMappingCommandInput) (result *IdentityMappingView, resp *http.Response, err error) + UpdateIdentityMappingCommand(input *UpdateIdentityMappingCommandInput) (result *IdentityMappingView, resp *http.Response, err error) +} + //GetIdentityMappingsCommand - Get all Identity Mappings //RequestType: GET //Input: input *GetIdentityMappingsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/keyPairs.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/keyPairs.go index 4d676b8b..f1f015e8 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/keyPairs.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/keyPairs.go @@ -9,6 +9,23 @@ import ( type KeyPairsService service +type KeyPairsAPI interface { + GetKeyPairsCommand(input *GetKeyPairsCommandInput) (result *KeyPairsView, resp *http.Response, err error) + GenerateKeyPairCommand(input *GenerateKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) + ImportKeyPairCommand(input *ImportKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) + KeyAlgorithms() (result *KeyAlgorithmsView, resp *http.Response, err error) + GetKeypairsCreatableGeneralNamesCommand() (result *SanTypes, resp *http.Response, err error) + DeleteKeyPairCommand(input *DeleteKeyPairCommandInput) (resp *http.Response, err error) + GetKeyPairCommand(input *GetKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) + PatchKeyPairCommand(input *PatchKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) + UpdateKeyPairCommand(input *UpdateKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) + ExportKeyPairCert(input *ExportKeyPairCertInput) (resp *http.Response, err error) + GenerateCsrCommand(input *GenerateCsrCommandInput) (resp *http.Response, err error) + ImportCSRResponseCommand(input *ImportCSRResponseCommandInput) (result *KeyPairView, resp *http.Response, err error) + ExportKeyPair(input *ExportKeyPairInput) (resp *http.Response, err error) + DeleteChainCertificateCommand(input *DeleteChainCertificateCommandInput) (resp *http.Response, err error) +} + //GetKeyPairsCommand - Get all Key Pairs //RequestType: GET //Input: input *GetKeyPairsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/license.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/license.go index cee291b9..699f1cbb 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/license.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/license.go @@ -8,6 +8,11 @@ import ( type LicenseService service +type LicenseAPI interface { + GetLicenseCommand() (result *LicenseView, resp *http.Response, err error) + ImportLicenseCommand(input *ImportLicenseCommandInput) (result *LicenseView, resp *http.Response, err error) +} + //GetLicenseCommand - Get the License File //RequestType: GET //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go index 2069361a..5592cf8e 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go @@ -21,35 +21,35 @@ type AccessTokenValidatorsView struct { //AcmeAccountView - An ACME Account associated with a CA. type AcmeAccountView struct { AcmeServerId *string `json:"acmeServerId,omitempty"` + Id *string `json:"id,omitempty"` KeyAlgorithm *string `json:"keyAlgorithm,omitempty"` PrivateKey *HiddenFieldView `json:"privateKey,omitempty"` PublicKey *PublicKeyView `json:"publicKey,omitempty"` Url *string `json:"url,omitempty"` - Id json.Number `json:"id,omitempty"` } -//AcmeCertStatus -type AcmeCertStatus struct { - Problems map[string]*ProblemDocument `json:"problems"` - State *string `json:"state"` +//AcmeCertStatusView - The status of a certificate. +type AcmeCertStatusView struct { + Problems map[string]*ProblemDocumentView `json:"problems"` + State *string `json:"state"` } -//AcmeCertificateRequestView - A request for a signed certificate. +//AcmeCertificateRequestView - A reference to a Key Pair to be signed by the ACME protocol. type AcmeCertificateRequestView struct { - AcmeAccountId *string `json:"acmeAccountId"` - AcmeCertStatus *AcmeCertStatus `json:"acmeCertStatus"` - AcmeServerId *string `json:"acmeServerId"` - KeyPairId *int `json:"keyPairId"` - Url *string `json:"url"` - Id json.Number `json:"id,omitempty"` + AcmeAccountId *string `json:"acmeAccountId"` + AcmeCertStatus *AcmeCertStatusView `json:"acmeCertStatus"` + AcmeServerId *string `json:"acmeServerId"` + Id *string `json:"id,omitempty"` + KeyPairId *int `json:"keyPairId"` + Url *string `json:"url"` } //AcmeServerView - An ACME server. type AcmeServerView struct { AcmeAccounts []*LinkView `json:"acmeAccounts,omitempty"` + Id *string `json:"id,omitempty"` Name *string `json:"name"` Url *string `json:"url"` - Id *string `json:"id,omitempty"` } //AcmeServersView - A collection of ACME servers. @@ -97,7 +97,7 @@ type AgentCertificateView struct { Alias *string `json:"alias"` ChainCertificate *bool `json:"chainCertificate"` Expires *string `json:"expires"` - Id json.Number `json:"id,omitempty"` + Id *int `json:"id,omitempty"` IssuerDn *string `json:"issuerDn"` KeyPair *bool `json:"keyPair"` Md5sum *string `json:"md5sum"` @@ -153,8 +153,8 @@ type AlgorithmsView struct { //ApiErrorView - An API error used by the PingAccess Administrative UI. type ApiErrorView struct { - Flash *[]*string `json:"flash"` - Form map[string]*[]string `json:"form"` + Flash *[]*string `json:"flash"` + Form map[string]*[]*string `json:"form"` } //ApplicationView - An application. @@ -318,7 +318,7 @@ type ConfigurationField struct { Help *Help `json:"help"` Label *string `json:"label"` Name *string `json:"name"` - Options *[]*ConfigurationOption `json:"options"` + Options []*ConfigurationOption `json:"options"` ParentField *ConfigurationParentField `json:"parentField"` Required *bool `json:"required"` Type *string `json:"type"` @@ -360,7 +360,7 @@ type EngineCertificateView struct { Alias *string `json:"alias"` ChainCertificate *bool `json:"chainCertificate"` Expires *string `json:"expires"` - Id json.Number `json:"id,omitempty"` + Id *int `json:"id,omitempty"` IssuerDn *string `json:"issuerDn"` KeyPair *bool `json:"keyPair"` Md5sum *string `json:"md5sum"` @@ -556,13 +556,13 @@ type ItemView struct { //JsonWebKey - A JSON Web Key. type JsonWebKey struct { - Algorithm *string `json:"algorithm"` - Key *Key `json:"key"` - KeyId *string `json:"keyId"` - KeyOps *[]*string `json:"keyOps"` - KeyType *string `json:"keyType"` - PublicKey *PublicKeyView `json:"publicKey"` - Use *string `json:"use"` + Algorithm *string `json:"algorithm"` + Key *Key `json:"key"` + KeyId *string `json:"keyId"` + KeyOps *[]*string `json:"keyOps"` + KeyType *string `json:"keyType"` + PublicKey *PublicKey `json:"publicKey"` + Use *string `json:"use"` } //Key - A key. @@ -593,7 +593,7 @@ type KeyPairView struct { CsrPending *bool `json:"csrPending"` Expires *int `json:"expires"` HsmProviderId *int `json:"hsmProviderId,omitempty"` - Id json.Number `json:"id,omitempty"` + Id *int `json:"id,omitempty"` IssuerDn *string `json:"issuerDn"` Md5sum *string `json:"md5sum"` SerialNumber *string `json:"serialNumber"` @@ -862,14 +862,14 @@ type PingOne4CView struct { UseProxy *bool `json:"useProxy,omitempty"` } -//PolicyItem - The policy items associated with the application +//PolicyItem - The policy items associated with the application - Now an undocumented model type type PolicyItem struct { Id json.Number `json:"id,omitempty"` Type *string `json:"type,omitempty"` } -//ProblemDocument -type ProblemDocument struct { +//ProblemDocumentView - An RFC 7807 problem details object. +type ProblemDocumentView struct { Detail *string `json:"detail"` Type *string `json:"type"` } @@ -879,6 +879,10 @@ type ProtocolSourceView struct { HeaderName *string `json:"headerName"` } +//PublicKey - An undocumented model type +type PublicKey struct { +} + //PublicKeyView - A public key. type PublicKeyView struct { Created *time.Time `json:"created,omitempty"` @@ -894,7 +898,6 @@ type QueryParameterView struct { //RedirectView - A Redirect. type RedirectView struct { AuditLevel *string `json:"auditLevel,omitempty"` - Id json.Number `json:"id,omitempty"` ResponseCode *int `json:"responseCode,omitempty"` Source *HostPortView `json:"source,omitempty"` Target *TargetHostPortView `json:"target,omitempty"` @@ -965,7 +968,6 @@ type ResourceMatchingEntryView struct { //ResourceMatchingEvaluationOrderView - Specifies an ordering of Resource Matching Entries. type ResourceMatchingEvaluationOrderView struct { Entries []*ResourceMatchingEntryView `json:"entries"` - Id json.Number `json:"id,omitempty"` } //ResourceOrderView - Specifies an ordering of Application Resources. @@ -993,7 +995,6 @@ type ResourceView struct { //ResourcesView - A collection of resources. type ResourcesView struct { - Id json.Number `json:"id,omitempty"` Items []*ResourceView `json:"items"` } @@ -1011,7 +1012,7 @@ type RuleDescriptorView struct { ClassName *string `json:"className"` ConfigurationFields []*ConfigurationField `json:"configurationFields"` Label *string `json:"label"` - Modes *[]*string `json:"modes"` + Modes []*string `json:"modes"` Type *string `json:"type"` } @@ -1058,7 +1059,7 @@ type RulesView struct { Items []*RuleView `json:"items"` } -//SanType - The valid General Names for creating Subject Alternative Names +//SanType - An undocumented model type type SanType struct { Description *string `json:"description,omitempty"` Name *string `json:"name,omitempty"` @@ -1187,7 +1188,7 @@ type TokenProviderSettingView struct { type TrustedCertView struct { Alias *string `json:"alias"` Expires *int `json:"expires"` - Id json.Number `json:"id,omitempty"` + Id *int `json:"id,omitempty"` IssuerDn *string `json:"issuerDn"` Md5sum *string `json:"md5sum"` SerialNumber *string `json:"serialNumber"` diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauth.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauth.go index 5e02ec74..f2e04962 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauth.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauth.go @@ -8,6 +8,12 @@ import ( type OauthService service +type OauthAPI interface { + DeleteAuthorizationServerCommand() (resp *http.Response, err error) + GetAuthorizationServerCommand() (result *AuthorizationServerView, resp *http.Response, err error) + UpdateAuthorizationServerCommand(input *UpdateAuthorizationServerCommandInput) (result *AuthorizationServerView, resp *http.Response, err error) +} + //DeleteAuthorizationServerCommand - Resets the OpenID Connect Provider configuration to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauthKeyManagement.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauthKeyManagement.go index 4745b29c..ac5b98e5 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauthKeyManagement.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauthKeyManagement.go @@ -8,6 +8,14 @@ import ( type OauthKeyManagementService service +type OauthKeyManagementAPI interface { + DeleteOAuthKeyManagementCommand() (resp *http.Response, err error) + GetOAuthKeyManagementCommand() (result *OAuthKeyManagementView, resp *http.Response, err error) + UpdateOAuthKeyManagementCommand(input *UpdateOAuthKeyManagementCommandInput) (result *OAuthKeyManagementView, resp *http.Response, err error) + GetOAuthKeySetCommand() (result *KeySetView, resp *http.Response, err error) + UpdateOAuthKeySetCommand(input *UpdateOAuthKeySetCommandInput) (result *KeySetView, resp *http.Response, err error) +} + //DeleteOAuthKeyManagementCommand - Resets the OAuth Key Management configuration to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oidc.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oidc.go index 2d3b0d8e..36424c23 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oidc.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oidc.go @@ -9,6 +9,15 @@ import ( type OidcService service +type OidcAPI interface { + DeleteOIDCProviderCommand() (resp *http.Response, err error) + GetOIDCProviderCommand() (result *OIDCProviderView, resp *http.Response, err error) + UpdateOIDCProviderCommand(input *UpdateOIDCProviderCommandInput) (result *OIDCProviderView, resp *http.Response, err error) + GetOIDCProviderPluginDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) + GetOIDCProviderPluginDescriptorCommand(input *GetOIDCProviderPluginDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) + GetOIDCProviderMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) +} + //DeleteOIDCProviderCommand - Resets the OpenID Connect Provider configuration to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go index b6344965..1557dc9e 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go @@ -11,6 +11,7 @@ import ( "net/http/httputil" "net/url" "runtime" + "strings" ) const logReqMsg = `DEBUG: Request %s Details: @@ -27,63 +28,59 @@ type Client struct { Password string BaseURL *url.URL Context string + LogDebug bool httpClient *http.Client - logDebug bool - AccessTokenValidators *AccessTokenValidatorsService - Acme *AcmeService - AdminConfigs *AdminConfigService - AdminSessions *AdminSessionInfoService - Agents *AgentsService - Applications *ApplicationsService - Auth *AuthService - AuthnReqLists *AuthnReqListsService - AuthTokenManagements *AuthTokenManagementService - Backups *BackupService - Certificates *CertificatesService - Config *ConfigService - EngineListeners *EngineListenersService - Engines *EnginesService - GlobalUnprotectedResources *GlobalUnprotectedResourcesService - HighAvailability *HighAvailabilityService - HsmProviders *HsmProvidersService - HttpConfig *HttpConfigService - HttpsListeners *HttpsListenersService - IdentityMappings *IdentityMappingsService - KeyPairs *KeyPairsService - License *LicenseService - OAuth *OauthService - OauthKeyManagement *OauthKeyManagementService - Oidc *OidcService - PingFederate *PingfederateService - PingOne *PingoneService - Proxies *ProxiesService - Redirects *RedirectsService - RejectionHandlers *RejectionHandlersService - Rules *RulesService - Rulesets *RulesetsService - SharedSecrets *SharedSecretsService - SiteAuthenticators *SiteAuthenticatorsService - Sites *SitesService - ThirdPartyServices *ThirdPartyServicesService - TokenProvider *TokenProviderService - TrustedCertificateGroups *TrustedCertificateGroupsService - UnknownResources *UnknownResourcesService - Users *UsersService - Version *VersionService - Virtualhosts *VirtualhostsService - WebSessionManagement *WebSessionManagementService - WebSessions *WebSessionsService + AccessTokenValidators AccessTokenValidatorsAPI + Acme AcmeAPI + AdminConfig AdminConfigAPI + AdminSessionInfo AdminSessionInfoAPI + Agents AgentsAPI + Applications ApplicationsAPI + Auth AuthAPI + AuthTokenManagement AuthTokenManagementAPI + AuthnReqLists AuthnReqListsAPI + Backup BackupAPI + Certificates CertificatesAPI + Config ConfigAPI + EngineListeners EngineListenersAPI + Engines EnginesAPI + GlobalUnprotectedResources GlobalUnprotectedResourcesAPI + HighAvailability HighAvailabilityAPI + HsmProviders HsmProvidersAPI + HttpConfig HttpConfigAPI + HttpsListeners HttpsListenersAPI + IdentityMappings IdentityMappingsAPI + KeyPairs KeyPairsAPI + License LicenseAPI + Oauth OauthAPI + OauthKeyManagement OauthKeyManagementAPI + Oidc OidcAPI + Pingfederate PingfederateAPI + Pingone PingoneAPI + Proxies ProxiesAPI + Redirects RedirectsAPI + RejectionHandlers RejectionHandlersAPI + Rules RulesAPI + Rulesets RulesetsAPI + SharedSecrets SharedSecretsAPI + SiteAuthenticators SiteAuthenticatorsAPI + Sites SitesAPI + ThirdPartyServices ThirdPartyServicesAPI + TokenProvider TokenProviderAPI + TrustedCertificateGroups TrustedCertificateGroupsAPI + UnknownResources UnknownResourcesAPI + Users UsersAPI + Version VersionAPI + Virtualhosts VirtualhostsAPI + WebSessionManagement WebSessionManagementAPI + WebSessions WebSessionsAPI } type service struct { client *Client } -func (c *Client) LogDebug(enable bool) { - c.logDebug = enable -} - func NewClient(username string, password string, baseUrl *url.URL, context string, httpClient *http.Client) *Client { if httpClient == nil { httpClient = http.DefaultClient @@ -96,14 +93,14 @@ func NewClient(username string, password string, baseUrl *url.URL, context strin c.AccessTokenValidators = &AccessTokenValidatorsService{client: c} c.Acme = &AcmeService{client: c} - c.AdminConfigs = &AdminConfigService{client: c} - c.AdminSessions = &AdminSessionInfoService{client: c} + c.AdminConfig = &AdminConfigService{client: c} + c.AdminSessionInfo = &AdminSessionInfoService{client: c} c.Agents = &AgentsService{client: c} c.Applications = &ApplicationsService{client: c} c.Auth = &AuthService{client: c} + c.AuthTokenManagement = &AuthTokenManagementService{client: c} c.AuthnReqLists = &AuthnReqListsService{client: c} - c.AuthTokenManagements = &AuthTokenManagementService{client: c} - c.Backups = &BackupService{client: c} + c.Backup = &BackupService{client: c} c.Certificates = &CertificatesService{client: c} c.Config = &ConfigService{client: c} c.EngineListeners = &EngineListenersService{client: c} @@ -116,11 +113,11 @@ func NewClient(username string, password string, baseUrl *url.URL, context strin c.IdentityMappings = &IdentityMappingsService{client: c} c.KeyPairs = &KeyPairsService{client: c} c.License = &LicenseService{client: c} - c.OAuth = &OauthService{client: c} + c.Oauth = &OauthService{client: c} c.OauthKeyManagement = &OauthKeyManagementService{client: c} c.Oidc = &OidcService{client: c} - c.PingFederate = &PingfederateService{client: c} - c.PingOne = &PingoneService{client: c} + c.Pingfederate = &PingfederateService{client: c} + c.Pingone = &PingoneService{client: c} c.Proxies = &ProxiesService{client: c} c.Redirects = &RedirectsService{client: c} c.RejectionHandlers = &RejectionHandlersService{client: c} @@ -165,7 +162,7 @@ func (c *Client) newRequest(method string, path *url.URL, body interface{}) (*ht } func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) { - if c.logDebug { + if c.LogDebug { requestDump, err := httputil.DumpRequest(req, true) if err != nil { fmt.Println(err) @@ -176,7 +173,7 @@ func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) { if err != nil { return nil, err } - if c.logDebug { + if c.LogDebug { responseDump, err := httputil.DumpResponse(resp, true) if err != nil { fmt.Println(err) @@ -191,7 +188,7 @@ func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) { if v != nil { if w, ok := v.(io.Writer); ok { - io.Copy(w, resp.Body) + _, err = io.Copy(w, resp.Body) } else { decErr := json.NewDecoder(resp.Body).Decode(v) if decErr == io.EOF { @@ -205,11 +202,6 @@ func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) { return resp, err } -type FailureResponse struct { - Form map[string][]string `json:"form"` - Flash []string `json:"flash"` -} - // CheckResponse checks the API response for errors, and returns them if // present. A response is considered an error if it has a status code outside // the 200 range. @@ -221,31 +213,37 @@ func CheckResponse(r *http.Response) error { return nil } - errorResponse := FailureResponse{} + errorResponse := ApiErrorView{} data, err := ioutil.ReadAll(r.Body) if err == nil && data != nil { - json.Unmarshal(data, &errorResponse) - } - - return &PingAccessError{ - Response: errorResponse, + err = json.Unmarshal(data, &errorResponse) + if err != nil { + return fmt.Errorf("Unable to parse error response: " + string(data)) + } } -} -// PingAccessError occurs when PingAccess returns a non 2XX response -type PingAccessError struct { - Response FailureResponse + return &errorResponse } -func (r *PingAccessError) Error() (message string) { - if r.Response.Flash != nil { - message = fmt.Sprintf("%s\n", r.Response.Flash) +func (r *ApiErrorView) Error() (message string) { + if len(*r.Flash) > 0 { + var msgs []string + for i := range *r.Flash { + msgs = append(msgs, *(*r.Flash)[i]) + } + message = strings.Join(msgs, ", ") } - for _, v := range r.Response.Form { - for _, s := range v { - message = fmt.Sprintf(message + s + "\n") + + if len(r.Form) > 0 { + for s, i := range r.Form { + var msgs []string + for _, j := range *i { + msgs = append(msgs, *j) + } + message += fmt.Sprintf(":\n%s contains %d validation failures:\n\t%s", s, len(msgs), strings.Join(msgs, "\n\t")) } } + return } diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccessSDK.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccessSDK.go index e988a0ac..f16c2b54 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccessSDK.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccessSDK.go @@ -2,4 +2,4 @@ package pingaccess const SDKName = "pingaccess-sdk-go" -const SDKVersion = "6.0.1" +const SDKVersion = "6.0.2.0" diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingfederate.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingfederate.go index 2f976ee8..600d0ca1 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingfederate.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingfederate.go @@ -8,6 +8,23 @@ import ( type PingfederateService service +type PingfederateAPI interface { + DeletePingFederateCommand() (resp *http.Response, err error) + GetPingFederateCommand() (result *PingFederateRuntimeView, resp *http.Response, err error) + UpdatePingFederateCommand(input *UpdatePingFederateCommandInput) (result *PingFederateRuntimeView, resp *http.Response, err error) + DeletePingFederateAccessTokensCommand() (resp *http.Response, err error) + GetPingFederateAccessTokensCommand() (result *PingFederateAccessTokenView, resp *http.Response, err error) + UpdatePingFederateAccessTokensCommand(input *UpdatePingFederateAccessTokensCommandInput) (result *PingFederateAccessTokenView, resp *http.Response, err error) + DeletePingFederateAdminCommand() (resp *http.Response, err error) + GetPingFederateAdminCommand() (result *PingFederateAdminView, resp *http.Response, err error) + UpdatePingFederateAdminCommand(input *UpdatePingFederateAdminCommandInput) (result *PingFederateAdminView, resp *http.Response, err error) + GetLegacyPingFederateMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) + DeletePingFederateRuntimeCommand() (resp *http.Response, err error) + GetPingFederateRuntimeCommand() (result *PingFederateMetadataRuntimeView, resp *http.Response, err error) + UpdatePingFederateRuntimeCommand(input *UpdatePingFederateRuntimeCommandInput) (result *PingFederateMetadataRuntimeView, resp *http.Response, err error) + GetPingFederateMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) +} + //DeletePingFederateCommand - [Attention: This endpoint "/pingfederate" is deprecated. Please use /pingfederate/runtime to configure PingFederate instead] Resets the PingFederate configuration to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingone.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingone.go index c49fef2c..be383e60 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingone.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingone.go @@ -8,6 +8,13 @@ import ( type PingoneService service +type PingoneAPI interface { + DeletePingOne4CCommand() (resp *http.Response, err error) + GetPingOne4CCommand() (result *PingOne4CView, resp *http.Response, err error) + UpdatePingOne4CCommand(input *UpdatePingOne4CCommandInput) (result *PingOne4CView, resp *http.Response, err error) + GetPingOne4CMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) +} + //DeletePingOne4CCommand - Resets the PingOne For Customers configuration to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/proxies.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/proxies.go index 78b34987..5432799d 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/proxies.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/proxies.go @@ -9,6 +9,14 @@ import ( type ProxiesService service +type ProxiesAPI interface { + GetProxiesCommand(input *GetProxiesCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) + AddProxyCommand(input *AddProxyCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) + DeleteProxyCommand(input *DeleteProxyCommandInput) (resp *http.Response, err error) + GetProxyCommand(input *GetProxyCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) + UpdateProxyCommand(input *UpdateProxyCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) +} + //GetProxiesCommand - Get all Proxies //RequestType: GET //Input: input *GetProxiesCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/redirects.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/redirects.go index 80b00761..bd08bd72 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/redirects.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/redirects.go @@ -9,6 +9,14 @@ import ( type RedirectsService service +type RedirectsAPI interface { + GetRedirectsCommand(input *GetRedirectsCommandInput) (result *RedirectsView, resp *http.Response, err error) + AddRedirectCommand(input *AddRedirectCommandInput) (result *RedirectView, resp *http.Response, err error) + DeleteRedirectCommand(input *DeleteRedirectCommandInput) (resp *http.Response, err error) + GetRedirectCommand(input *GetRedirectCommandInput) (result *RedirectView, resp *http.Response, err error) + UpdateRedirectCommand(input *UpdateRedirectCommandInput) (result *RedirectView, resp *http.Response, err error) +} + //GetRedirectsCommand - Get all Redirects //RequestType: GET //Input: input *GetRedirectsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rejectionHandlers.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rejectionHandlers.go index 09c32a32..3d0bd3c4 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rejectionHandlers.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rejectionHandlers.go @@ -9,6 +9,16 @@ import ( type RejectionHandlersService service +type RejectionHandlersAPI interface { + GetRejectionHandlersCommand(input *GetRejectionHandlersCommandInput) (result *RejectionHandlersView, resp *http.Response, err error) + AddRejectionHandlerCommand(input *AddRejectionHandlerCommandInput) (result *RejectionHandlerView, resp *http.Response, err error) + GetRejectionHandlerDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) + GetRejecitonHandlerDescriptorCommand(input *GetRejecitonHandlerDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) + DeleteRejectionHandlerCommand(input *DeleteRejectionHandlerCommandInput) (resp *http.Response, err error) + GetRejectionHandlerCommand(input *GetRejectionHandlerCommandInput) (result *RejectionHandlerView, resp *http.Response, err error) + UpdateRejectionHandlerCommand(input *UpdateRejectionHandlerCommandInput) (result *RejectionHandlerView, resp *http.Response, err error) +} + //GetRejectionHandlersCommand - Get all Rejection Handlers //RequestType: GET //Input: input *GetRejectionHandlersCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rules.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rules.go index 777cd0aa..079531f5 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rules.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rules.go @@ -9,6 +9,16 @@ import ( type RulesService service +type RulesAPI interface { + GetRulesCommand(input *GetRulesCommandInput) (result *RulesView, resp *http.Response, err error) + AddRuleCommand(input *AddRuleCommandInput) (result *RuleView, resp *http.Response, err error) + GetRuleDescriptorsCommand() (result *RuleDescriptorsView, resp *http.Response, err error) + GetRuleDescriptorCommand(input *GetRuleDescriptorCommandInput) (result *RuleDescriptorView, resp *http.Response, err error) + DeleteRuleCommand(input *DeleteRuleCommandInput) (resp *http.Response, err error) + GetRuleCommand(input *GetRuleCommandInput) (result *RuleView, resp *http.Response, err error) + UpdateRuleCommand(input *UpdateRuleCommandInput) (result *RuleView, resp *http.Response, err error) +} + //GetRulesCommand - Get all Rules //RequestType: GET //Input: input *GetRulesCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rulesets.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rulesets.go index 4a04e969..a6f1d856 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rulesets.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rulesets.go @@ -9,6 +9,16 @@ import ( type RulesetsService service +type RulesetsAPI interface { + GetRuleSetsCommand(input *GetRuleSetsCommandInput) (result *RuleSetsView, resp *http.Response, err error) + AddRuleSetCommand(input *AddRuleSetCommandInput) (result *RuleSetView, resp *http.Response, err error) + GetRuleSetElementTypesCommand() (result *RuleSetElementTypesView, resp *http.Response, err error) + GetRuleSetSuccessCriteriaCommand() (result *RuleSetSuccessCriteriaView, resp *http.Response, err error) + DeleteRuleSetCommand(input *DeleteRuleSetCommandInput) (resp *http.Response, err error) + GetRuleSetCommand(input *GetRuleSetCommandInput) (result *RuleSetView, resp *http.Response, err error) + UpdateRuleSetCommand(input *UpdateRuleSetCommandInput) (result *RuleSetView, resp *http.Response, err error) +} + //GetRuleSetsCommand - Get all Rule Sets //RequestType: GET //Input: input *GetRuleSetsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sharedSecrets.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sharedSecrets.go index 2435db64..49defc9f 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sharedSecrets.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sharedSecrets.go @@ -9,6 +9,13 @@ import ( type SharedSecretsService service +type SharedSecretsAPI interface { + GetSharedSecretsCommand(input *GetSharedSecretsCommandInput) (result *SharedSecretsView, resp *http.Response, err error) + AddSharedSecretCommand(input *AddSharedSecretCommandInput) (result *SharedSecretView, resp *http.Response, err error) + DeleteSharedSecretCommand(input *DeleteSharedSecretCommandInput) (resp *http.Response, err error) + GetSharedSecretCommand(input *GetSharedSecretCommandInput) (result *SharedSecretView, resp *http.Response, err error) +} + //GetSharedSecretsCommand - Get all Shared Secrets //RequestType: GET //Input: input *GetSharedSecretsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/siteAuthenticators.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/siteAuthenticators.go index c1806e78..67d9beda 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/siteAuthenticators.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/siteAuthenticators.go @@ -9,6 +9,16 @@ import ( type SiteAuthenticatorsService service +type SiteAuthenticatorsAPI interface { + GetSiteAuthenticatorsCommand(input *GetSiteAuthenticatorsCommandInput) (result *SiteAuthenticatorsView, resp *http.Response, err error) + AddSiteAuthenticatorCommand(input *AddSiteAuthenticatorCommandInput) (result *SiteAuthenticatorView, resp *http.Response, err error) + GetSiteAuthenticatorDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) + GetSiteAuthenticatorDescriptorCommand(input *GetSiteAuthenticatorDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) + DeleteSiteAuthenticatorCommand(input *DeleteSiteAuthenticatorCommandInput) (resp *http.Response, err error) + GetSiteAuthenticatorCommand(input *GetSiteAuthenticatorCommandInput) (result *SiteAuthenticatorView, resp *http.Response, err error) + UpdateSiteAuthenticatorCommand(input *UpdateSiteAuthenticatorCommandInput) (result *SiteAuthenticatorView, resp *http.Response, err error) +} + //GetSiteAuthenticatorsCommand - Get all Site Authenticators //RequestType: GET //Input: input *GetSiteAuthenticatorsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sites.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sites.go index 955166d6..5ff7571e 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sites.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sites.go @@ -9,6 +9,14 @@ import ( type SitesService service +type SitesAPI interface { + GetSitesCommand(input *GetSitesCommandInput) (result *SitesView, resp *http.Response, err error) + AddSiteCommand(input *AddSiteCommandInput) (result *SiteView, resp *http.Response, err error) + DeleteSiteCommand(input *DeleteSiteCommandInput) (resp *http.Response, err error) + GetSiteCommand(input *GetSiteCommandInput) (result *SiteView, resp *http.Response, err error) + UpdateSiteCommand(input *UpdateSiteCommandInput) (result *SiteView, resp *http.Response, err error) +} + //GetSitesCommand - Get all Sites //RequestType: GET //Input: input *GetSitesCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/thirdPartyServices.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/thirdPartyServices.go index e97b3be4..bc8b80aa 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/thirdPartyServices.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/thirdPartyServices.go @@ -9,6 +9,14 @@ import ( type ThirdPartyServicesService service +type ThirdPartyServicesAPI interface { + GetThirdPartyServicesCommand(input *GetThirdPartyServicesCommandInput) (result *ThirdPartyServicesView, resp *http.Response, err error) + AddThirdPartyServiceCommand(input *AddThirdPartyServiceCommandInput) (result *ThirdPartyServiceView, resp *http.Response, err error) + DeleteThirdPartyServiceCommand(input *DeleteThirdPartyServiceCommandInput) (resp *http.Response, err error) + GetThirdPartyServiceCommand(input *GetThirdPartyServiceCommandInput) (result *ThirdPartyServiceView, resp *http.Response, err error) + UpdateThirdPartyServiceCommand(input *UpdateThirdPartyServiceCommandInput) (result *ThirdPartyServiceView, resp *http.Response, err error) +} + //GetThirdPartyServicesCommand - Get all Third-Party Services //RequestType: GET //Input: input *GetThirdPartyServicesCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/tokenProvider.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/tokenProvider.go index e7ff680f..9f02aa84 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/tokenProvider.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/tokenProvider.go @@ -8,6 +8,12 @@ import ( type TokenProviderService service +type TokenProviderAPI interface { + DeleteTokenProviderSettingCommand() (resp *http.Response, err error) + GetTokenProviderSettingCommand() (result *TokenProviderSettingView, resp *http.Response, err error) + UpdateTokenProviderSettingCommand(input *UpdateTokenProviderSettingCommandInput) (result *TokenProviderSettingView, resp *http.Response, err error) +} + //DeleteTokenProviderSettingCommand - Resets the Token Provider settings to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/trustedCertificateGroups.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/trustedCertificateGroups.go index 98b1a830..47410471 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/trustedCertificateGroups.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/trustedCertificateGroups.go @@ -9,6 +9,14 @@ import ( type TrustedCertificateGroupsService service +type TrustedCertificateGroupsAPI interface { + GetTrustedCertificateGroupsCommand(input *GetTrustedCertificateGroupsCommandInput) (result *TrustedCertificateGroupsView, resp *http.Response, err error) + AddTrustedCertificateGroupCommand(input *AddTrustedCertificateGroupCommandInput) (result *TrustedCertificateGroupView, resp *http.Response, err error) + DeleteTrustedCertificateGroupCommand(input *DeleteTrustedCertificateGroupCommandInput) (resp *http.Response, err error) + GetTrustedCertificateGroupCommand(input *GetTrustedCertificateGroupCommandInput) (result *TrustedCertificateGroupView, resp *http.Response, err error) + UpdateTrustedCertificateGroupCommand(input *UpdateTrustedCertificateGroupCommandInput) (result *TrustedCertificateGroupView, resp *http.Response, err error) +} + //GetTrustedCertificateGroupsCommand - Get all Trusted Certificate Groups //RequestType: GET //Input: input *GetTrustedCertificateGroupsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/unknownResources.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/unknownResources.go index 64d92c0f..e93ea899 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/unknownResources.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/unknownResources.go @@ -8,6 +8,12 @@ import ( type UnknownResourcesService service +type UnknownResourcesAPI interface { + Delete() (resp *http.Response, err error) + Get() (result *UnknownResourceSettingsView, resp *http.Response, err error) + Update(input *UpdateInput) (result *UnknownResourceSettingsView, resp *http.Response, err error) +} + //Delete - Resets the global settings for unknown resources to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/users.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/users.go index c7d65b4c..7c4f1e5c 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/users.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/users.go @@ -9,6 +9,13 @@ import ( type UsersService service +type UsersAPI interface { + GetUsersCommand(input *GetUsersCommandInput) (result *UsersView, resp *http.Response, err error) + GetUserCommand(input *GetUserCommandInput) (result *UserView, resp *http.Response, err error) + UpdateUserCommand(input *UpdateUserCommandInput) (result *UserView, resp *http.Response, err error) + UpdateUserPasswordCommand(input *UpdateUserPasswordCommandInput) (result *UserPasswordView, resp *http.Response, err error) +} + //GetUsersCommand - Get all Users //RequestType: GET //Input: input *GetUsersCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/version.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/version.go index e5663d54..6bc6c2fa 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/version.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/version.go @@ -8,6 +8,10 @@ import ( type VersionService service +type VersionAPI interface { + VersionCommand() (result *VersionDocClass, resp *http.Response, err error) +} + //VersionCommand - Get the PingAccess version number //RequestType: GET //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/virtualhosts.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/virtualhosts.go index d0a3b51a..c5d5438c 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/virtualhosts.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/virtualhosts.go @@ -9,6 +9,14 @@ import ( type VirtualhostsService service +type VirtualhostsAPI interface { + GetVirtualHostsCommand(input *GetVirtualHostsCommandInput) (result *VirtualHostsView, resp *http.Response, err error) + AddVirtualHostCommand(input *AddVirtualHostCommandInput) (result *VirtualHostView, resp *http.Response, err error) + DeleteVirtualHostCommand(input *DeleteVirtualHostCommandInput) (resp *http.Response, err error) + GetVirtualHostCommand(input *GetVirtualHostCommandInput) (result *VirtualHostView, resp *http.Response, err error) + UpdateVirtualHostCommand(input *UpdateVirtualHostCommandInput) (result *VirtualHostView, resp *http.Response, err error) +} + //GetVirtualHostsCommand - Get all Virtual Hosts //RequestType: GET //Input: input *GetVirtualHostsCommandInput diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessionManagement.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessionManagement.go index 3c676f2a..971b63e6 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessionManagement.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessionManagement.go @@ -8,6 +8,21 @@ import ( type WebSessionManagementService service +type WebSessionManagementAPI interface { + DeleteWebSessionManagementCommand() (resp *http.Response, err error) + GetWebSessionManagementCommand() (result *WebSessionManagementView, resp *http.Response, err error) + UpdateWebSessionManagementCommand(input *UpdateWebSessionManagementCommandInput) (result *WebSessionManagementView, resp *http.Response, err error) + GetCookieTypes() (result *CookieTypesView, resp *http.Response, err error) + GetWebSessionSupportedEncryptionAlgorithmsCommand() (result *AlgorithmsView, resp *http.Response, err error) + GetWebSessionKeySetCommand() (result *KeySetView, resp *http.Response, err error) + UpdateWebSessionKeySetCommand(input *UpdateWebSessionKeySetCommandInput) (result *KeySetView, resp *http.Response, err error) + GetOidcLoginTypes() (result *OidcLoginTypesView, resp *http.Response, err error) + GetOidcScopesCommand(input *GetOidcScopesCommandInput) (result *SupportedScopesView, resp *http.Response, err error) + GetRequestPreservationTypes() (result *RequestPreservationTypesView, resp *http.Response, err error) + GetWebSessionSupportedSigningAlgorithms() (result *SigningAlgorithmsView, resp *http.Response, err error) + GetWebStorageTypes() (result *WebStorageTypesView, resp *http.Response, err error) +} + //DeleteWebSessionManagementCommand - Resets the Web Session Management configuration to default values //RequestType: DELETE //Input: diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessions.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessions.go index 1097d836..e64052d5 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessions.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessions.go @@ -9,6 +9,14 @@ import ( type WebSessionsService service +type WebSessionsAPI interface { + GetWebSessionsCommand(input *GetWebSessionsCommandInput) (result *WebSessionsView, resp *http.Response, err error) + AddWebSessionCommand(input *AddWebSessionCommandInput) (result *WebSessionView, resp *http.Response, err error) + DeleteWebSessionCommand(input *DeleteWebSessionCommandInput) (resp *http.Response, err error) + GetWebSessionCommand(input *GetWebSessionCommandInput) (result *WebSessionView, resp *http.Response, err error) + UpdateWebSessionCommand(input *UpdateWebSessionCommandInput) (result *WebSessionView, resp *http.Response, err error) +} + //GetWebSessionsCommand - Get all WebSessions //RequestType: GET //Input: input *GetWebSessionsCommandInput diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore index 531fcc11..5091fb07 100644 --- a/vendor/github.com/jmespath/go-jmespath/.gitignore +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -1,4 +1,4 @@ -jpgo +/jpgo jmespath-fuzz.zip cpu.out go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 9cfa988b..8e26ffee 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -2,7 +2,7 @@ package jmespath import "strconv" -// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is // safe for concurrent use by multiple goroutines. type JMESPath struct { ast ASTNode diff --git a/vendor/modules.txt b/vendor/modules.txt index 157724ce..e29a5d3f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -18,7 +18,7 @@ github.com/Nvveen/Gotty github.com/agext/levenshtein # github.com/apparentlymart/go-textseg v1.0.0 github.com/apparentlymart/go-textseg/textseg -# github.com/aws/aws-sdk-go v1.15.78 +# github.com/aws/aws-sdk-go v1.25.3 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil @@ -28,6 +28,7 @@ github.com/aws/aws-sdk-go/aws/corehandlers github.com/aws/aws-sdk-go/aws/credentials github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds github.com/aws/aws-sdk-go/aws/credentials/endpointcreds +github.com/aws/aws-sdk-go/aws/credentials/processcreds github.com/aws/aws-sdk-go/aws/credentials/stscreds github.com/aws/aws-sdk-go/aws/csm github.com/aws/aws-sdk-go/aws/defaults @@ -39,12 +40,14 @@ github.com/aws/aws-sdk-go/aws/signer/v4 github.com/aws/aws-sdk-go/internal/ini github.com/aws/aws-sdk-go/internal/s3err github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath github.com/aws/aws-sdk-go/internal/sdkrand github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults github.com/aws/aws-sdk-go/private/protocol github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil github.com/aws/aws-sdk-go/private/protocol/query github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest @@ -52,6 +55,7 @@ github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil github.com/aws/aws-sdk-go/service/s3 github.com/aws/aws-sdk-go/service/sts +github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d github.com/bgentry/go-netrc/netrc # github.com/cenkalti/backoff v2.2.1+incompatible @@ -96,14 +100,14 @@ github.com/hashicorp/go-cty/cty/gocty github.com/hashicorp/go-cty/cty/json github.com/hashicorp/go-cty/cty/msgpack github.com/hashicorp/go-cty/cty/set -# github.com/hashicorp/go-getter v1.4.0 +# github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02 github.com/hashicorp/go-getter github.com/hashicorp/go-getter/helper/url # github.com/hashicorp/go-hclog v0.9.2 github.com/hashicorp/go-hclog # github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.2.0 +# github.com/hashicorp/go-plugin v1.3.0 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin # github.com/hashicorp/go-safetemp v1.0.0 @@ -114,18 +118,17 @@ github.com/hashicorp/go-uuid github.com/hashicorp/go-version # github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/golang-lru/simplelru -# github.com/hashicorp/hcl/v2 v2.0.0 +# github.com/hashicorp/hcl/v2 v2.3.0 github.com/hashicorp/hcl/v2 +github.com/hashicorp/hcl/v2/ext/customdecode github.com/hashicorp/hcl/v2/hclsyntax # github.com/hashicorp/logutils v1.0.0 github.com/hashicorp/logutils # github.com/hashicorp/terraform-json v0.4.0 github.com/hashicorp/terraform-json -# github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.1 +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.2 ## explicit -github.com/hashicorp/terraform-plugin-sdk/v2/acctest github.com/hashicorp/terraform-plugin-sdk/v2/diag -github.com/hashicorp/terraform-plugin-sdk/v2/helper/hashcode github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema @@ -133,21 +136,23 @@ github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim github.com/hashicorp/terraform-plugin-sdk/v2/internal/diagutils +github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5 +github.com/hashicorp/terraform-plugin-sdk/v2/meta github.com/hashicorp/terraform-plugin-sdk/v2/plugin github.com/hashicorp/terraform-plugin-sdk/v2/terraform -# github.com/hashicorp/terraform-plugin-test v1.3.0 +# github.com/hashicorp/terraform-plugin-test v1.4.0 github.com/hashicorp/terraform-plugin-test # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux -# github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200605105101-6a0dcd01d169 +# github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200614193934-b24fe6e545e5 ## explicit github.com/iwarapter/pingaccess-sdk-go/pingaccess -# github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 +# github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af github.com/jmespath/go-jmespath # github.com/konsorten/go-windows-terminal-sequences v1.0.2 github.com/konsorten/go-windows-terminal-sequences From fe7b4d2dceda848a4564dc639fa6a16b1d629305 Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Sun, 14 Jun 2020 21:15:14 +0100 Subject: [PATCH 10/23] add github action badge --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b7d35b14..282cfa8f 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ PingAccess Terraform Provider - [![Gitter](https://badges.gitter.im/iwarapter/terraform-provider-pingaccess.svg)](https://gitter.im/iwarapter/terraform-provider-pingaccess?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![Coverage](https://sonarcloud.io/api/project_badges/measure?project=github.conef.uk.iwarapter.terraform-provider-pingaccess&metric=coverage)](https://sonarcloud.io/dashboard?id=github.conef.uk.iwarapter.terraform-provider-pingaccess) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=github.conef.uk.iwarapter.terraform-provider-pingaccess&metric=alert_status)](https://sonarcloud.io/dashboard?id=github.conef.uk.iwarapter.terraform-provider-pingaccess) - [![Build Status](https://travis-ci.org/iwarapter/terraform-provider-pingaccess.svg?branch=master)](https://travis-ci.org/iwarapter/terraform-provider-pingaccess) + ![ci](https://github.com/iwarapter/terraform-provider-pingaccess/workflows/ci/badge.svg) ![GitHub release (latest by date)](https://img.shields.io/github/v/release/iwarapter/terraform-provider-pingaccess) From 06854f63b952b348889be2a41995d94be707fdfb Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Sun, 28 Jun 2020 14:57:07 +0100 Subject: [PATCH 11/23] enable pf runtime 5.x/6.x support in parallel, keypair generate and code cleanup --- .goreleaser.yml | 46 ++++ func-tests/main.tf | 1 - main.go | 23 +- .../data_source_pingaccess_acme_default.go | 7 +- .../data_source_pingaccess_certificate.go | 7 +- pingaccess/data_source_pingaccess_keypair.go | 16 +- ...ingaccess_pingfederate_runtime_metadata.go | 5 +- ...cess_pingfederate_runtime_metadata_test.go | 2 +- ...e_pingaccess_trusted_certificate_groups.go | 9 +- pingaccess/provider.go | 2 +- pingaccess/provider_test.go | 146 ++++-------- ...ource_pingaccess_access_token_validator.go | 21 +- pingaccess/resource_pingaccess_acme_server.go | 15 +- pingaccess/resource_pingaccess_application.go | 38 +-- ...esource_pingaccess_application_resource.go | 18 +- .../resource_pingaccess_application_test.go | 2 +- ...source_pingaccess_auth_token_management.go | 13 +- .../resource_pingaccess_authn_req_list.go | 17 +- pingaccess/resource_pingaccess_certificate.go | 17 +- .../resource_pingaccess_engine_listener.go | 17 +- .../resource_pingaccess_hsm_provider.go | 25 +- ...gaccess_http_config_request_host_source.go | 13 +- .../resource_pingaccess_https_listener.go | 17 +- .../resource_pingaccess_identity_mapping.go | 25 +- pingaccess/resource_pingaccess_keypair.go | 133 +++++++++-- .../resource_pingaccess_keypair_test.go | 33 ++- .../resource_pingaccess_oauth_server.go | 13 +- .../resource_pingaccess_pingfederate_admin.go | 13 +- ...urce_pingaccess_pingfederate_admin_test.go | 18 +- .../resource_pingaccess_pingfederate_oauth.go | 13 +- ...esource_pingaccess_pingfederate_runtime.go | 222 ++++++++++++++++-- ...ce_pingaccess_pingfederate_runtime_test.go | 162 +++++++++++-- pingaccess/resource_pingaccess_rule.go | 21 +- pingaccess/resource_pingaccess_rulesets.go | 9 +- pingaccess/resource_pingaccess_site.go | 21 +- .../resource_pingaccess_site_authenticator.go | 25 +- ...resource_pingaccess_third_party_service.go | 10 +- ...e_pingaccess_trusted_certificate_groups.go | 21 +- pingaccess/resource_pingaccess_virtualhost.go | 9 +- pingaccess/resource_pingaccess_websession.go | 21 +- pingaccess/structures.go | 4 +- pingaccess/validators.go | 54 ++--- pingaccess/validators_test.go | 21 +- 43 files changed, 867 insertions(+), 458 deletions(-) create mode 100644 .goreleaser.yml diff --git a/.goreleaser.yml b/.goreleaser.yml new file mode 100644 index 00000000..65a66a7f --- /dev/null +++ b/.goreleaser.yml @@ -0,0 +1,46 @@ +before: + hooks: + - go mod tidy +builds: + - env: + - CGO_ENABLED=0 + goos: + - openbsd + - solaris + - windows + - linux + - darwin + goarch: + - amd64 + - '386' + - arm + - arm64 + ignore: + - goos: darwin + goarch: '386' + - goos: openbsd + goarch: arm + - goos: openbsd + goarch: arm64 + binary: '{{ .ProjectName }}_v{{ .Version }}' + flags: | + -gcflags="-trimpath=$GOPATH" -asmflags="-trimpath=$GOPATH" +archives: + - format: zip + name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}' +checksum: + name_template: '{{ .ProjectName }}_{{ .Version }}_SHA256SUMS' + algorithm: sha256 +signs: + - artifacts: checksum + args: + - "--local-user" + - "BE9710A9C84DA5AA" # Replace this with your GPG signing key ID + - "--output" + - "${signature}" + - "--detach-sign" + - "${artifact}" +release: + draft: true +changelog: + skip: true diff --git a/func-tests/main.tf b/func-tests/main.tf index f4b4d949..bc483e6a 100644 --- a/func-tests/main.tf +++ b/func-tests/main.tf @@ -24,7 +24,6 @@ resource "pingaccess_application" "demo" { agent_id = 0 name = "demo" context_root = "/" - default_auth_type = "API" destination = "Site" site_id = pingaccess_site.demo.id virtual_host_ids = [pingaccess_virtualhost.demo.id] diff --git a/main.go b/main.go index a45c8936..9c96f9a3 100644 --- a/main.go +++ b/main.go @@ -1,11 +1,30 @@ package main import ( + "context" + "flag" "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" "github.com/iwarapter/terraform-provider-pingaccess/pingaccess" + "log" ) func main() { - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: pingaccess.Provider}) + var debugMode bool + + flag.BoolVar(&debugMode, "debuggable", false, "set to true to run the provider with support for debuggers like delve") + flag.Parse() + + if debugMode { + err := plugin.Debug(context.Background(), "registry.terraform.io/-/pingfederate", + &plugin.ServeOpts{ + ProviderFunc: pingaccess.Provider, + }) + if err != nil { + log.Println(err.Error()) + } + } else { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: pingaccess.Provider}) + } + } diff --git a/pingaccess/data_source_pingaccess_acme_default.go b/pingaccess/data_source_pingaccess_acme_default.go index 67f9e924..086afd76 100644 --- a/pingaccess/data_source_pingaccess_acme_default.go +++ b/pingaccess/data_source_pingaccess_acme_default.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -25,11 +24,11 @@ func dataSourcePingAccessAcmeDefaultSchema() map[string]*schema.Schema { } } -func dataSourcePingAccessAcmeDefaultRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func dataSourcePingAccessAcmeDefaultRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Acme - result, resp, err := svc.GetDefaultAcmeServerCommand() + result, _, err := svc.GetDefaultAcmeServerCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to read ACME Default: %s\n%v", err.Error(), resp)) + return diag.Errorf("unable to read ACME Default: %s", err) } d.SetId(*result.Id) diff --git a/pingaccess/data_source_pingaccess_certificate.go b/pingaccess/data_source_pingaccess_certificate.go index 0737d103..51abb8ab 100644 --- a/pingaccess/data_source_pingaccess_certificate.go +++ b/pingaccess/data_source_pingaccess_certificate.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -20,18 +19,18 @@ func dataSourcePingAccessCertificate() *schema.Resource { } } -func dataSourcePingAccessCertificateRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func dataSourcePingAccessCertificateRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Certificates input := &pa.GetTrustedCertsInput{ Alias: d.Get("alias").(string), } result, _, err := svc.GetTrustedCerts(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read Certificate: %s", err)) + return diag.Errorf("unable to read Certificate: %s", err) } if len(result.Items) != 1 { - return diag.FromErr(fmt.Errorf("unable to find Certificate with alias '%s' found '%d' results", d.Get("alias").(string), len(result.Items))) + return diag.Errorf("unable to find Certificate with alias '%s' found '%d' results", d.Get("alias").(string), len(result.Items)) } d.SetId(strconv.Itoa(*result.Items[0].Id)) return resourcePingAccessCertificateReadResult(d, result.Items[0]) diff --git a/pingaccess/data_source_pingaccess_keypair.go b/pingaccess/data_source_pingaccess_keypair.go index b10fc69e..a383452d 100644 --- a/pingaccess/data_source_pingaccess_keypair.go +++ b/pingaccess/data_source_pingaccess_keypair.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -15,23 +14,32 @@ func dataSourcePingAccessKeyPair() *schema.Resource { //The normal certificate schema has a file data passed to it, this isnt required for the data resource delete(sch, "file_data") delete(sch, "password") + delete(sch, "city") + delete(sch, "common_name") + delete(sch, "country") + delete(sch, "key_algorithm") + delete(sch, "key_size") + delete(sch, "organization") + delete(sch, "organization_unit") + delete(sch, "state") + delete(sch, "valid_days") return &schema.Resource{ ReadContext: dataSourcePingAccessKeyPairRead, Schema: sch, } } -func dataSourcePingAccessKeyPairRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func dataSourcePingAccessKeyPairRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).KeyPairs input := &pa.GetKeyPairsCommandInput{ Alias: d.Get("alias").(string), } result, _, err := svc.GetKeyPairsCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read KeyPair: %s", err)) + return diag.Errorf("unable to read KeyPair: %s", err) } if len(result.Items) != 1 { - return diag.FromErr(fmt.Errorf("unable to find KeyPair with alias '%s' found '%d' results", d.Get("alias").(string), len(result.Items))) + return diag.Errorf("unable to find KeyPair with alias '%s' found '%d' results", d.Get("alias").(string), len(result.Items)) } d.SetId(strconv.Itoa(*result.Items[0].Id)) return resourcePingAccessKeyPairReadResult(d, result.Items[0]) diff --git a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go index d541e556..9194ccef 100644 --- a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go +++ b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -89,11 +88,11 @@ func dataSourcePingAccessPingFederateRuntimeMetadataSchema() map[string]*schema. } } -func dataSourcePingAccessPingFederateRuntimeMetadataRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func dataSourcePingAccessPingFederateRuntimeMetadataRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Pingfederate result, resp, err := svc.GetPingFederateMetadataCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to read PingFederate Runtime Metadata: %s\n%v", err, resp)) + return diag.Errorf("unable to read PingFederate Runtime Metadata: %s\n%v", err, resp) } var diags diag.Diagnostics d.SetId("pingfederate_runtime_metadata") diff --git a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata_test.go b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata_test.go index 598bf246..63d04026 100644 --- a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata_test.go +++ b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata_test.go @@ -57,7 +57,7 @@ func TestAccPingAccessPingFederateRuntimeMetadataDataSource(t *testing.T) { func testAccPingAccessPingFederateRuntimeMetadataConfig() string { return fmt.Sprintf(`data "pingaccess_pingfederate_runtime_metadata" "test" {} resource "pingaccess_pingfederate_runtime" "app_demo_pfr" { - issuer = "https://%s:9031" + issuer = "%s" trusted_certificate_group_id = 2 }`, os.Getenv("PINGFEDERATE_TEST_IP")) } diff --git a/pingaccess/data_source_pingaccess_trusted_certificate_groups.go b/pingaccess/data_source_pingaccess_trusted_certificate_groups.go index 56d31d9e..e2a14135 100644 --- a/pingaccess/data_source_pingaccess_trusted_certificate_groups.go +++ b/pingaccess/data_source_pingaccess_trusted_certificate_groups.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -48,20 +47,20 @@ func dataSourcePingAccessTrustedCertificateGroupsSchema() map[string]*schema.Sch } } -func dataSourcePingAccessTrustedCertificateGroupsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func dataSourcePingAccessTrustedCertificateGroupsRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).TrustedCertificateGroups input := &pa.GetTrustedCertificateGroupsCommandInput{ Name: d.Get("name").(string), } result, _, err := svc.GetTrustedCertificateGroupsCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read TrustedCertificateGroup: %s", err)) + return diag.Errorf("unable to read TrustedCertificateGroup: %s", err) } if result == nil { - return diag.FromErr(fmt.Errorf("unable to find TrustedCertificateGroup with the name '%s', result was nil", d.Get("name").(string))) + return diag.Errorf("unable to find TrustedCertificateGroup with the name '%s', result was nil", d.Get("name").(string)) } if len(result.Items) != 1 { - return diag.FromErr(fmt.Errorf("unable to find TrustedCertificateGroup with the name '%s' found '%d' results", d.Get("name").(string), len(result.Items))) + return diag.Errorf("unable to find TrustedCertificateGroup with the name '%s' found '%d' results", d.Get("name").(string), len(result.Items)) } d.SetId(result.Items[0].Id.String()) return resourcePingAccessTrustedCertificateGroupsReadResult(d, result.Items[0]) diff --git a/pingaccess/provider.go b/pingaccess/provider.go index fb2592f4..3b0e5953 100644 --- a/pingaccess/provider.go +++ b/pingaccess/provider.go @@ -86,7 +86,7 @@ func init() { } } -func providerConfigure(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { +func providerConfigure(_ context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { config := &config{ Username: d.Get("username").(string), diff --git a/pingaccess/provider_test.go b/pingaccess/provider_test.go index a5d52011..788ae038 100644 --- a/pingaccess/provider_test.go +++ b/pingaccess/provider_test.go @@ -1,20 +1,19 @@ package pingaccess import ( - "bytes" "context" "crypto/tls" - "errors" "fmt" - "io/ioutil" "log" "math/rand" "net/http" + "net/http/httptest" "net/url" "os" "path/filepath" "reflect" "runtime" + "strings" "testing" "time" @@ -22,7 +21,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/iwarapter/pingaccess-sdk-go/pingaccess" "github.com/ory/dockertest/v3" - "github.com/tidwall/sjson" ) func TestMain(m *testing.M) { @@ -32,12 +30,46 @@ func TestMain(m *testing.M) { if err != nil { log.Fatalf("Could not connect to docker: %s", err) } - networkName := "tf-pa-test-network" - network, err := pool.CreateNetwork(networkName) - if err != nil { - log.Fatalf("Could not create docker network: %s", err) - } - defer network.Close() + server := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + // Send response to be tested + rw.Header().Set("Content-Type", "application/json;charset=utf-8") + rw.Write([]byte(` +{ + "issuer": "https://localhost:9031", + "authorization_endpoint": "https://localhost:9031/as/authorization.oauth2", + "token_endpoint": "https://localhost:9031/as/token.oauth2", + "revocation_endpoint": "https://localhost:9031/as/revoke_token.oauth2", + "userinfo_endpoint": "https://localhost:9031/idp/userinfo.openid", + "introspection_endpoint": "https://localhost:9031/as/introspect.oauth2", + "jwks_uri": "https://localhost:9031/pf/JWKS", + "registration_endpoint": "https://localhost:9031/as/clients.oauth2", + "ping_revoked_sris_endpoint": "https://localhost:9031/pf-ws/rest/sessionMgmt/revokedSris", + "ping_end_session_endpoint": "https://localhost:9031/idp/startSLO.ping", + "device_authorization_endpoint": "https://localhost:9031/as/device_authz.oauth2", + "scopes_supported": [ "address", "mail", "phone", "openid", "profile", "group1" ], + "response_types_supported": [ "code", "token", "id_token", "code token", "code id_token", "token id_token", "code token id_token" ], + "response_modes_supported": [ "fragment", "query", "form_post" ], + "grant_types_supported": [ "implicit", "authorization_code", "refresh_token", "password", "client_credentials", "urn:pingidentity.com:oauth2:grant_type:validate_bearer", "urn:ietf:params:oauth:grant-type:jwt-bearer", "urn:ietf:params:oauth:grant-type:saml2-bearer", "urn:ietf:params:oauth:grant-type:device_code", "urn:openid:params:grant-type:ciba" ], + "subject_types_supported": [ "public", "pairwise" ], + "id_token_signing_alg_values_supported": [ "none", "HS256", "HS384", "HS512", "RS256", "RS384", "RS512", "ES256", "ES384", "ES512", "PS256", "PS384", "PS512" ], + "token_endpoint_auth_methods_supported": [ "client_secret_basic", "client_secret_post", "private_key_jwt" ], + "token_endpoint_auth_signing_alg_values_supported": [ "RS256", "RS384", "RS512", "ES256", "ES384", "ES512", "PS256", "PS384", "PS512" ], + "claim_types_supported": [ "normal" ], + "claims_parameter_supported": false, + "request_parameter_supported": true, + "request_uri_parameter_supported": false, + "request_object_signing_alg_values_supported": [ "RS256", "RS384", "RS512", "ES256", "ES384", "ES512", "PS256", "PS384", "PS512" ], + "id_token_encryption_alg_values_supported": [ "dir", "A128KW", "A192KW", "A256KW", "A128GCMKW", "A192GCMKW", "A256GCMKW", "ECDH-ES", "ECDH-ES+A128KW", "ECDH-ES+A192KW", "ECDH-ES+A256KW", "RSA-OAEP" ], + "id_token_encryption_enc_values_supported": [ "A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512", "A128GCM", "A192GCM", "A256GCM" ], + "backchannel_authentication_endpoint": "https://localhost:9031/as/bc-auth.ciba", + "backchannel_token_delivery_modes_supported": [ "poll", "ping" ], + "backchannel_authentication_request_signing_alg_values_supported": [ "RS256", "RS384", "RS512", "ES256", "ES384", "ES512", "PS256", "PS384", "PS512" ], + "backchannel_user_code_parameter_supported": false +} +`)) + })) + // Close the server when test finishes + defer server.Close() devOpsUser, devOpsUserExists := os.LookupEnv("PING_IDENTITY_DEVOPS_USER") devOpsKey, devOpsKeyExists := os.LookupEnv("PING_IDENTITY_DEVOPS_KEY") @@ -50,42 +82,22 @@ func TestMain(m *testing.M) { paOpts := &dockertest.RunOptions{ Name: fmt.Sprintf("pa-%s", randomID), Repository: "pingidentity/pingaccess", - Tag: "6.0.1-edge", - NetworkID: network.Network.ID, + Tag: "6.0.2-edge", Env: []string{"PING_IDENTITY_ACCEPT_EULA=YES", fmt.Sprintf("PING_IDENTITY_DEVOPS_USER=%s", devOpsUser), fmt.Sprintf("PING_IDENTITY_DEVOPS_KEY=%s", devOpsKey)}, } - pfOpts := &dockertest.RunOptions{ - Name: fmt.Sprintf("pf-%s", randomID), - Repository: "pingidentity/pingfederate", - Tag: "10.0.2-edge", - NetworkID: network.Network.ID, - Env: []string{ - "PING_IDENTITY_ACCEPT_EULA=YES", - fmt.Sprintf("PING_IDENTITY_DEVOPS_USER=%s", devOpsUser), - fmt.Sprintf("PING_IDENTITY_DEVOPS_KEY=%s", devOpsKey), - "SERVER_PROFILE_URL=https://github.com/pingidentity/pingidentity-server-profiles.git", - "SERVER_PROFILE_PATH=getting-started/pingfederate", - }, - } - // pulls an image, creates a container based on it and runs it paCont, err := pool.RunWithOptions(paOpts) if err != nil { log.Fatalf("Could not create pingaccess container: %s", err) } defer paCont.Close() - pfCont, err := pool.RunWithOptions(pfOpts) - if err != nil { - log.Fatalf("Could not create pingfederate container: %s", err) - } - defer pfCont.Close() pool.MaxWait = time.Minute * 2 // exponential backoff-retry, because the application in the container might not be ready to accept connections yet - url, _ := url.Parse(fmt.Sprintf("https://localhost:%s", paCont.GetPort("9000/tcp"))) - log.Printf("Setting PingAccess admin API: %s", url.String()) + u, _ := url.Parse(fmt.Sprintf("https://localhost:%s", paCont.GetPort("9000/tcp"))) + log.Printf("Setting PingAccess admin API: %s", u.String()) http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - client := pingaccess.NewClient("administrator", "2FederateM0re", url, "/pa-admin-api/v3", nil) + client := pingaccess.NewClient("administrator", "2FederateM0re", u, "/pa-admin-api/v3", nil) if err = pool.Retry(func() error { log.Println("Attempting to connect to PingAccess admin API....") _, _, err = client.Version.VersionCommand() @@ -95,44 +107,23 @@ func TestMain(m *testing.M) { } os.Setenv("PINGACCESS_BASEURL", fmt.Sprintf("https://localhost:%s", paCont.GetPort("9000/tcp"))) os.Setenv("PINGACCESS_PASSWORD", "2FederateM0re") - os.Setenv("PINGFEDERATE_TEST_IP", pfCont.GetIPInNetwork(network)) + os.Setenv("PINGFEDERATE_TEST_IP", strings.Replace(server.URL, "127.0.0.1", "host.docker.internal", -1)) log.Println("Connected to PingAccess admin API....") - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} version, _, err := client.Version.VersionCommand() if err != nil { log.Fatalf("Failed to retrieve version from server: %v", err) } log.Printf("Connected to PingAccess version: %s", *version.Version) - pfPort := pfCont.GetPort("9999/tcp") - pfURL, _ := url.Parse(fmt.Sprintf("https://localhost:%s", pfPort)) - pfClient := &http.Client{} - if err = pool.Retry(func() error { - log.Println("Attempting to connect to PingFederate admin API....") - _, err := connectToPF(pfClient, pfURL.String()) - return err - }); err != nil { - log.Fatalf("Could not connect to pingaccess: %s", err) - } - err = setupPF(pfURL.String()) - if err != nil { - log.Fatalf("Failed to setup PF server: %v", err) - } - log.Printf("Connected to PingFederate setup complete") paCont.Expire(360) - pfCont.Expire(360) - //resource.TestMain(m) code := m.Run() paCont.Close() - pfCont.Close() - network.Close() log.Printf("Tests complete shutting down container") os.Exit(code) } else { m.Run() - //resource.TestMain(m) } } @@ -146,49 +137,6 @@ func init() { } } -func connectToPF(client *http.Client, adminURL string) (*http.Response, error) { - req, err := http.NewRequest("GET", fmt.Sprintf("%s/pf-admin-api/v1/serverSettings", adminURL), nil) - if err != nil { - return nil, err - } - req.SetBasicAuth("Administrator", "2FederateM0re") - req.Header.Add("X-Xsrf-Header", "pingfederate") - resp, err := client.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode != 200 { - return nil, errors.New("Incorrect response code from admin: " + resp.Status) - } - return resp, nil -} - -func setupPF(adminURL string) error { - client := &http.Client{} - resp, err := connectToPF(client, adminURL) - if err != nil { - return err - } - bodyText, _ := ioutil.ReadAll(resp.Body) - s := string(bodyText) - s, err = sjson.Set(s, "rolesAndProtocols.oauthRole.enableOpenIdConnect", true) - if err != nil { - return err - } - req, _ := http.NewRequest("PUT", fmt.Sprintf("%s/pf-admin-api/v1/serverSettings", adminURL), bytes.NewBuffer([]byte(s))) - req.SetBasicAuth("Administrator", "2FederateM0re") - req.Header.Add("X-Xsrf-Header", "pingfederate") - req.Header.Set("Content-Type", "application/json") - resp, err = client.Do(req) - if err != nil { - return err - } - if resp.StatusCode != 200 { - return errors.New("Incorrect response code from admin: " + resp.Status) - } - return nil -} - func testAccPreCheck(t *testing.T) { err := testAccProvider.Configure(context.TODO(), terraform.NewResourceConfigRaw(nil)) if err != nil { diff --git a/pingaccess/resource_pingaccess_access_token_validator.go b/pingaccess/resource_pingaccess_access_token_validator.go index aead2047..525d9374 100644 --- a/pingaccess/resource_pingaccess_access_token_validator.go +++ b/pingaccess/resource_pingaccess_access_token_validator.go @@ -23,7 +23,10 @@ func resourcePingAccessAccessTokenValidator() *schema.Resource { Schema: resourcePingAccessAccessTokenValidatorSchema(), CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { svc := m.(*pingaccess.Client).AccessTokenValidators - desc, _, _ := svc.GetAccessTokenValidatorDescriptorsCommand() + desc, _, err := svc.GetAccessTokenValidatorDescriptorsCommand() + if err != nil { + return fmt.Errorf("unable to retrieve AccessTokenValidator descriptors %s", err) + } className := d.Get("class_name").(string) if err := descriptorsHasClassName(className, desc); err != nil { return err @@ -51,7 +54,7 @@ func resourcePingAccessAccessTokenValidatorSchema() map[string]*schema.Schema { } } -func resourcePingAccessAccessTokenValidatorCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAccessTokenValidatorCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AccessTokenValidators input := pingaccess.AddAccessTokenValidatorCommandInput{ Body: *resourcePingAccessAccessTokenValidatorReadData(d), @@ -59,14 +62,14 @@ func resourcePingAccessAccessTokenValidatorCreate(ctx context.Context, d *schema result, _, err := svc.AddAccessTokenValidatorCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create AccessTokenValidator: %s", err)) + return diag.Errorf("unable to create AccessTokenValidator: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessAccessTokenValidatorReadResult(d, result, svc) } -func resourcePingAccessAccessTokenValidatorRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAccessTokenValidatorRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AccessTokenValidators input := &pingaccess.GetAccessTokenValidatorCommandInput{ @@ -75,13 +78,13 @@ func resourcePingAccessAccessTokenValidatorRead(ctx context.Context, d *schema.R result, _, err := svc.GetAccessTokenValidatorCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read AccessTokenValidator: %s", err)) + return diag.Errorf("unable to read AccessTokenValidator: %s", err) } return resourcePingAccessAccessTokenValidatorReadResult(d, result, svc) } -func resourcePingAccessAccessTokenValidatorUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAccessTokenValidatorUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AccessTokenValidators input := pingaccess.UpdateAccessTokenValidatorCommandInput{ Body: *resourcePingAccessAccessTokenValidatorReadData(d), @@ -90,18 +93,18 @@ func resourcePingAccessAccessTokenValidatorUpdate(ctx context.Context, d *schema result, _, err := svc.UpdateAccessTokenValidatorCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update AccessTokenValidator: %s", err)) + return diag.Errorf("unable to update AccessTokenValidator: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessAccessTokenValidatorReadResult(d, result, svc) } -func resourcePingAccessAccessTokenValidatorDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAccessTokenValidatorDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AccessTokenValidators _, err := svc.DeleteAccessTokenValidatorCommand(&pingaccess.DeleteAccessTokenValidatorCommandInput{Id: d.Id()}) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete AccessTokenValidator: %s", err)) + return diag.Errorf("unable to delete AccessTokenValidator: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_acme_server.go b/pingaccess/resource_pingaccess_acme_server.go index 8d53f11e..b1ed922a 100644 --- a/pingaccess/resource_pingaccess_acme_server.go +++ b/pingaccess/resource_pingaccess_acme_server.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -38,7 +37,7 @@ func resourcePingAccessAcmeServerSchema() map[string]*schema.Schema { } } -func resourcePingAccessAcmeServerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAcmeServerCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).Acme input := pingaccess.AddAcmeServerCommandInput{ Body: *resourcePingAccessAcmeServerReadData(d), @@ -46,33 +45,33 @@ func resourcePingAccessAcmeServerCreate(ctx context.Context, d *schema.ResourceD result, _, err := svc.AddAcmeServerCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create AcmeServer: %s", err)) + return diag.Errorf("unable to create AcmeServer: %s", err) } d.SetId(*result.Id) return resourcePingAccessAcmeServerReadResult(d, &input.Body) } -func resourcePingAccessAcmeServerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAcmeServerRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).Acme input := &pingaccess.GetAcmeServerCommandInput{ AcmeServerId: d.Id(), } result, _, err := svc.GetAcmeServerCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read AcmeServer: %s", err)) + return diag.Errorf("unable to read AcmeServer: %s", err) } return resourcePingAccessAcmeServerReadResult(d, result) } -func resourcePingAccessAcmeServerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAcmeServerDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).Acme input := &pingaccess.DeleteAcmeServerCommandInput{ AcmeServerId: d.Id(), } - _, resp, err := svc.DeleteAcmeServerCommand(input) + _, _, err := svc.DeleteAcmeServerCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete AcmeServer: %s resp: %v", err, *resp)) + return diag.Errorf("unable to delete AcmeServer: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_application.go b/pingaccess/resource_pingaccess_application.go index ad38a664..e26500c5 100644 --- a/pingaccess/resource_pingaccess_application.go +++ b/pingaccess/resource_pingaccess_application.go @@ -2,9 +2,9 @@ package pingaccess import ( "context" - "fmt" "strconv" + "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" @@ -38,6 +38,13 @@ func resourcePingAccessApplicationSchema() map[string]*schema.Schema { Type: schema.TypeString, Required: true, ForceNew: true, + ValidateDiagFunc: func(value interface{}, path cty.Path) diag.Diagnostics { + v := value.(string) + if v != "Web" && v != "API" && v != "Dynamic" { + return diag.Errorf("must be either 'Web', 'API' or 'Dynamic' not %s", v) + } + return nil + }, }, "case_sensitive_path": { Type: schema.TypeBool, @@ -49,8 +56,9 @@ func resourcePingAccessApplicationSchema() map[string]*schema.Schema { Required: true, }, "default_auth_type": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + Deprecated: "This field is no longer used and should be removed.", }, "description": { Type: schema.TypeString, @@ -123,33 +131,33 @@ func resourcePingAccessApplicationSchema() map[string]*schema.Schema { } } -func resourcePingAccessApplicationCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessApplicationCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Applications input := &pa.AddApplicationCommandInput{ Body: *resourcePingAccessApplicationReadData(d), } result, _, err := svc.AddApplicationCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create Application: %s", err)) + return diag.Errorf("unable to create Application: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessApplicationReadResult(d, &input.Body) } -func resourcePingAccessApplicationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessApplicationRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Applications input := &pa.GetApplicationCommandInput{ Id: d.Id(), } result, _, err := svc.GetApplicationCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read Application: %s", err)) + return diag.Errorf("unable to read Application: %s", err) } return resourcePingAccessApplicationReadResult(d, result) } -func resourcePingAccessApplicationUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessApplicationUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Applications input := pa.UpdateApplicationCommandInput{ Body: *resourcePingAccessApplicationReadData(d), @@ -157,12 +165,12 @@ func resourcePingAccessApplicationUpdate(ctx context.Context, d *schema.Resource } result, _, err := svc.UpdateApplicationCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update Application: %s", err)) + return diag.Errorf("unable to update Application: %s", err) } return resourcePingAccessApplicationReadResult(d, result) } -func resourcePingAccessApplicationDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessApplicationDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Applications input := &pa.DeleteApplicationCommandInput{ @@ -171,7 +179,7 @@ func resourcePingAccessApplicationDelete(ctx context.Context, d *schema.Resource _, _, err := svc.DeleteApplicationCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete Application: %s", err)) + return diag.Errorf("unable to delete Application: %s", err) } return nil } @@ -183,7 +191,6 @@ func resourcePingAccessApplicationReadResult(d *schema.ResourceData, rv *pa.Appl setResourceDataStringWithDiagnostic(d, "application_type", rv.ApplicationType, &diags) setResourceDataBoolWithDiagnostic(d, "case_sensitive_path", rv.CaseSensitivePath, &diags) setResourceDataStringWithDiagnostic(d, "context_root", rv.ContextRoot, &diags) - setResourceDataStringWithDiagnostic(d, "default_auth_type", rv.DefaultAuthType, &diags) setResourceDataStringWithDiagnostic(d, "description", rv.Description, &diags) setResourceDataStringWithDiagnostic(d, "destination", rv.Destination, &diags) setResourceDataBoolWithDiagnostic(d, "enabled", rv.Enabled, &diags) @@ -224,7 +231,7 @@ func resourcePingAccessApplicationReadResult(d *schema.ResourceData, rv *pa.Appl func resourcePingAccessApplicationReadData(d *schema.ResourceData) *pa.ApplicationView { siteID, _ := strconv.Atoi(d.Get("site_id").(string)) virtualHostIds := expandStringList(d.Get("virtual_host_ids").(*schema.Set).List()) - vhIds := []*int{} + var vhIds []*int for _, i := range virtualHostIds { text, _ := strconv.Atoi(*i) vhIds = append(vhIds, &text) @@ -235,12 +242,15 @@ func resourcePingAccessApplicationReadData(d *schema.ResourceData) *pa.Applicati Name: String(d.Get("name").(string)), ApplicationType: String(d.Get("application_type").(string)), ContextRoot: String(d.Get("context_root").(string)), - DefaultAuthType: String(d.Get("default_auth_type").(string)), SiteId: Int(siteID), SpaSupportEnabled: Bool(d.Get("spa_support_enabled").(bool)), VirtualHostIds: &vhIds, } + if *application.ApplicationType != "Dynamic" { + application.DefaultAuthType = application.ApplicationType + } + if _, ok := d.GetOk("access_validator_id"); ok { application.AccessValidatorId = Int(d.Get("access_validator_id").(int)) } diff --git a/pingaccess/resource_pingaccess_application_resource.go b/pingaccess/resource_pingaccess_application_resource.go index ce0a20d9..50fddc48 100644 --- a/pingaccess/resource_pingaccess_application_resource.go +++ b/pingaccess/resource_pingaccess_application_resource.go @@ -115,7 +115,7 @@ func resourcePingAccessApplicationResourceCreate(ctx context.Context, d *schema. } result, _, err := svc.GetApplicationResourcesCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create ApplicationResource: %s", err)) + return diag.Errorf("unable to create ApplicationResource: %s", err) } rv := result.Items[0] d.SetId(rv.Id.String()) @@ -130,14 +130,14 @@ func resourcePingAccessApplicationResourceCreate(ctx context.Context, d *schema. result, _, err := svc.AddApplicationResourceCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create ApplicationResource: %s", err)) + return diag.Errorf("unable to create ApplicationResource: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessApplicationResourceReadResult(d, result) } -func resourcePingAccessApplicationResourceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessApplicationResourceRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Applications input := &pa.GetApplicationResourceCommandInput{ ApplicationId: d.Get("application_id").(string), @@ -146,13 +146,13 @@ func resourcePingAccessApplicationResourceRead(ctx context.Context, d *schema.Re result, _, err := svc.GetApplicationResourceCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read ApplicationResource: %s", err)) + return diag.Errorf("unable to read ApplicationResource: %s", err) } return resourcePingAccessApplicationResourceReadResult(d, result) } -func resourcePingAccessApplicationResourceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessApplicationResourceUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Applications input := pa.UpdateApplicationResourceCommandInput{ ApplicationId: d.Get("application_id").(string), @@ -162,12 +162,12 @@ func resourcePingAccessApplicationResourceUpdate(ctx context.Context, d *schema. result, _, err := svc.UpdateApplicationResourceCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update ApplicationResource: %s", err)) + return diag.Errorf("unable to update ApplicationResource: %s", err) } return resourcePingAccessApplicationResourceReadResult(d, result) } -func resourcePingAccessApplicationResourceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessApplicationResourceDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Applications if d.Get("root_resource").(bool) { @@ -181,12 +181,12 @@ func resourcePingAccessApplicationResourceDelete(ctx context.Context, d *schema. _, err := svc.DeleteApplicationResourceCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete ApplicationResource: %s", err)) + return diag.Errorf("unable to delete ApplicationResource: %s", err) } return nil } -func resourcePingAccessApplicationResourceImport(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { +func resourcePingAccessApplicationResourceImport(_ context.Context, d *schema.ResourceData, _ interface{}) ([]*schema.ResourceData, error) { idParts := strings.SplitN(d.Id(), "/", 2) if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { return nil, fmt.Errorf("unexpected format of ID (%q), expected /", d.Id()) diff --git a/pingaccess/resource_pingaccess_application_test.go b/pingaccess/resource_pingaccess_application_test.go index e8fba0e7..bd044de3 100644 --- a/pingaccess/resource_pingaccess_application_test.go +++ b/pingaccess/resource_pingaccess_application_test.go @@ -145,7 +145,7 @@ func testAccPingAccessApplicationConfig(name, context, appType string) string { } resource "pingaccess_pingfederate_runtime" "app_demo_pfr" { - issuer = "https://%s:9031" + issuer = "%s" trusted_certificate_group_id = 2 } diff --git a/pingaccess/resource_pingaccess_auth_token_management.go b/pingaccess/resource_pingaccess_auth_token_management.go index 50d1b07a..d3c98378 100644 --- a/pingaccess/resource_pingaccess_auth_token_management.go +++ b/pingaccess/resource_pingaccess_auth_token_management.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" @@ -58,35 +57,35 @@ func resourcePingAccessAuthTokenManagementCreate(ctx context.Context, d *schema. return resourcePingAccessAuthTokenManagementUpdate(ctx, d, m) } -func resourcePingAccessAuthTokenManagementRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAuthTokenManagementRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).AuthTokenManagement result, _, err := svc.GetAuthTokenManagementCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to read AuthTokenManagement: %s", err)) + return diag.Errorf("unable to read AuthTokenManagement: %s", err) } return resourcePingAccessAuthTokenManagementReadResult(d, result) } -func resourcePingAccessAuthTokenManagementUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAuthTokenManagementUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).AuthTokenManagement input := pa.UpdateAuthTokenManagementCommandInput{ Body: *resourcePingAccessAuthTokenManagementReadData(d), } result, _, err := svc.UpdateAuthTokenManagementCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update AuthTokenManagement: %s", err)) + return diag.Errorf("unable to update AuthTokenManagement: %s", err) } d.SetId("auth_token_management") return resourcePingAccessAuthTokenManagementReadResult(d, result) } -func resourcePingAccessAuthTokenManagementDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAuthTokenManagementDelete(_ context.Context, _ *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).AuthTokenManagement _, err := svc.DeleteAuthTokenManagementCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete AuthTokenManagement: %s", err)) + return diag.Errorf("unable to delete AuthTokenManagement: %s", err) } return nil diff --git a/pingaccess/resource_pingaccess_authn_req_list.go b/pingaccess/resource_pingaccess_authn_req_list.go index 073641dd..07ad820c 100644 --- a/pingaccess/resource_pingaccess_authn_req_list.go +++ b/pingaccess/resource_pingaccess_authn_req_list.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -33,7 +32,7 @@ func resourcePingAccessAuthnReqListSchema() map[string]*schema.Schema { } } -func resourcePingAccessAuthnReqListCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAuthnReqListCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AuthnReqLists input := pingaccess.AddAuthnReqListCommandInput{ Body: *resourcePingAccessAuthnReqListReadData(d), @@ -41,26 +40,26 @@ func resourcePingAccessAuthnReqListCreate(ctx context.Context, d *schema.Resourc result, _, err := svc.AddAuthnReqListCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create AuthnReqList: %s", err)) + return diag.Errorf("unable to create AuthnReqList: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessAuthnReqListReadResult(d, &input.Body) } -func resourcePingAccessAuthnReqListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAuthnReqListRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AuthnReqLists input := &pingaccess.GetAuthnReqListCommandInput{ Id: d.Id(), } result, _, err := svc.GetAuthnReqListCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read AuthnReqList: %s", err)) + return diag.Errorf("unable to read AuthnReqList: %s", err) } return resourcePingAccessAuthnReqListReadResult(d, result) } -func resourcePingAccessAuthnReqListUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAuthnReqListUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AuthnReqLists input := pingaccess.UpdateAuthnReqListCommandInput{ Body: *resourcePingAccessAuthnReqListReadData(d), @@ -69,12 +68,12 @@ func resourcePingAccessAuthnReqListUpdate(ctx context.Context, d *schema.Resourc result, _, err := svc.UpdateAuthnReqListCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update AuthnReqList: %s", err)) + return diag.Errorf("unable to update AuthnReqList: %s", err) } return resourcePingAccessAuthnReqListReadResult(d, result) } -func resourcePingAccessAuthnReqListDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessAuthnReqListDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).AuthnReqLists input := &pingaccess.DeleteAuthnReqListCommandInput{ Id: d.Id(), @@ -82,7 +81,7 @@ func resourcePingAccessAuthnReqListDelete(ctx context.Context, d *schema.Resourc _, err := svc.DeleteAuthnReqListCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete AuthnReqList: %s", err)) + return diag.Errorf("unable to delete AuthnReqList: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_certificate.go b/pingaccess/resource_pingaccess_certificate.go index 40feade9..4c1d44fd 100644 --- a/pingaccess/resource_pingaccess_certificate.go +++ b/pingaccess/resource_pingaccess_certificate.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -77,7 +76,7 @@ func resourcePingAccessCertificateSchema() map[string]*schema.Schema { } } -func resourcePingAccessCertificateCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessCertificateCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { input := pa.ImportTrustedCertInput{ Body: pa.X509FileImportDocView{ Alias: String(d.Get("alias").(string)), @@ -89,26 +88,26 @@ func resourcePingAccessCertificateCreate(ctx context.Context, d *schema.Resource result, _, err := svc.ImportTrustedCert(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create Certificate: %s", err)) + return diag.Errorf("unable to create Certificate: %s", err) } d.SetId(strconv.Itoa(*result.Id)) return resourcePingAccessCertificateReadResult(d, result) } -func resourcePingAccessCertificateRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessCertificateRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Certificates input := &pa.GetTrustedCertInput{ Id: d.Id(), } result, _, err := svc.GetTrustedCert(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read Certificate: %s", err)) + return diag.Errorf("unable to read Certificate: %s", err) } return resourcePingAccessCertificateReadResult(d, result) } -func resourcePingAccessCertificateUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessCertificateUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { input := pa.UpdateTrustedCertInput{ Body: pa.X509FileImportDocView{ Alias: String(d.Get("alias").(string)), @@ -121,18 +120,18 @@ func resourcePingAccessCertificateUpdate(ctx context.Context, d *schema.Resource result, _, err := svc.UpdateTrustedCert(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update Certificate: %s", err)) + return diag.Errorf("unable to update Certificate: %s", err) } d.SetId(strconv.Itoa(*result.Id)) return resourcePingAccessCertificateReadResult(d, result) } -func resourcePingAccessCertificateDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessCertificateDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Certificates _, err := svc.DeleteTrustedCertCommand(&pa.DeleteTrustedCertCommandInput{Id: d.Id()}) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete Certificate: %s", err)) + return diag.Errorf("unable to delete Certificate: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_engine_listener.go b/pingaccess/resource_pingaccess_engine_listener.go index 71d66d39..93e89ced 100644 --- a/pingaccess/resource_pingaccess_engine_listener.go +++ b/pingaccess/resource_pingaccess_engine_listener.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -46,7 +45,7 @@ func resourcePingAccessEngineListenerSchema() map[string]*schema.Schema { } } -func resourcePingAccessEngineListenerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessEngineListenerCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).EngineListeners input := pa.AddEngineListenerCommandInput{ Body: *resourcePingAccessEngineListenerReadData(d), @@ -54,26 +53,26 @@ func resourcePingAccessEngineListenerCreate(ctx context.Context, d *schema.Resou result, _, err := svc.AddEngineListenerCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create EngineListener: %s", err)) + return diag.Errorf("unable to create EngineListener: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessEngineListenerReadResult(d, &input.Body) } -func resourcePingAccessEngineListenerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessEngineListenerRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).EngineListeners input := &pa.GetEngineListenerCommandInput{ Id: d.Id(), } result, _, err := svc.GetEngineListenerCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read EngineListener: %s", err)) + return diag.Errorf("unable to read EngineListener: %s", err) } return resourcePingAccessEngineListenerReadResult(d, result) } -func resourcePingAccessEngineListenerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessEngineListenerUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).EngineListeners input := pa.UpdateEngineListenerCommandInput{ Body: *resourcePingAccessEngineListenerReadData(d), @@ -82,12 +81,12 @@ func resourcePingAccessEngineListenerUpdate(ctx context.Context, d *schema.Resou result, _, err := svc.UpdateEngineListenerCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update EngineListener: %s", err)) + return diag.Errorf("unable to update EngineListener: %s", err) } return resourcePingAccessEngineListenerReadResult(d, result) } -func resourcePingAccessEngineListenerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessEngineListenerDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).EngineListeners input := &pa.DeleteEngineListenerCommandInput{ Id: d.Id(), @@ -95,7 +94,7 @@ func resourcePingAccessEngineListenerDelete(ctx context.Context, d *schema.Resou _, err := svc.DeleteEngineListenerCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete EngineListener: %s", err)) + return diag.Errorf("unable to delete EngineListener: %s", err) } return nil diff --git a/pingaccess/resource_pingaccess_hsm_provider.go b/pingaccess/resource_pingaccess_hsm_provider.go index 6b7cfe24..3f71152b 100644 --- a/pingaccess/resource_pingaccess_hsm_provider.go +++ b/pingaccess/resource_pingaccess_hsm_provider.go @@ -2,10 +2,8 @@ package pingaccess import ( "context" - "crypto/tls" "encoding/json" "fmt" - "net/http" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -24,7 +22,10 @@ func resourcePingAccessHsmProvider() *schema.Resource { Schema: resourcePingAccessHsmProviderSchema(), CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { svc := m.(*pingaccess.Client).HsmProviders - desc, _, _ := svc.GetHsmProviderDescriptorsCommand() + desc, _, err := svc.GetHsmProviderDescriptorsCommand() + if err != nil { + return fmt.Errorf("unable to retrieve HsmProvider descriptors %s", err) + } className := d.Get("class_name").(string) if err := descriptorsHasClassName(className, desc); err != nil { return err @@ -51,7 +52,7 @@ func resourcePingAccessHsmProviderSchema() map[string]*schema.Schema { } } -func resourcePingAccessHsmProviderCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHsmProviderCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).HsmProviders input := pingaccess.AddHsmProviderCommandInput{ Body: *resourcePingAccessHsmProviderReadData(d), @@ -59,26 +60,26 @@ func resourcePingAccessHsmProviderCreate(ctx context.Context, d *schema.Resource result, _, err := svc.AddHsmProviderCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create HsmProvider: %s", err)) + return diag.Errorf("unable to create HsmProvider: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessHsmProviderReadResult(d, result, svc) } -func resourcePingAccessHsmProviderRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHsmProviderRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).HsmProviders input := &pingaccess.GetHsmProviderCommandInput{ Id: d.Id(), } result, _, err := svc.GetHsmProviderCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read HsmProvider: %s", err)) + return diag.Errorf("unable to read HsmProvider: %s", err) } return resourcePingAccessHsmProviderReadResult(d, result, svc) } -func resourcePingAccessHsmProviderUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHsmProviderUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).HsmProviders input := pingaccess.UpdateHsmProviderCommandInput{ Body: *resourcePingAccessHsmProviderReadData(d), @@ -86,19 +87,17 @@ func resourcePingAccessHsmProviderUpdate(ctx context.Context, d *schema.Resource } result, _, err := svc.UpdateHsmProviderCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update HsmProvider: %s", err)) + return diag.Errorf("unable to update HsmProvider: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessHsmProviderReadResult(d, result, svc) } -func resourcePingAccessHsmProviderDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHsmProviderDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).HsmProviders - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - _, err := svc.DeleteHsmProviderCommand(&pingaccess.DeleteHsmProviderCommandInput{Id: d.Id()}) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete HsmProvider: %s", err)) + return diag.Errorf("unable to delete HsmProvider: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_http_config_request_host_source.go b/pingaccess/resource_pingaccess_http_config_request_host_source.go index 8010e35d..5b111ea6 100644 --- a/pingaccess/resource_pingaccess_http_config_request_host_source.go +++ b/pingaccess/resource_pingaccess_http_config_request_host_source.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -37,32 +36,32 @@ func resourcePingAccessHTTPConfigRequestHostSourceCreate(ctx context.Context, d return resourcePingAccessHTTPConfigRequestHostSourceUpdate(ctx, d, m) } -func resourcePingAccessHTTPConfigRequestHostSourceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHTTPConfigRequestHostSourceRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).HttpConfig result, _, err := svc.GetHostSourceCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to read HttpConfigHostSource: %s", err)) + return diag.Errorf("unable to read HttpConfigHostSource: %s", err) } return resourcePingAccessHTTPConfigRequestHostSourceReadResult(d, result) } -func resourcePingAccessHTTPConfigRequestHostSourceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHTTPConfigRequestHostSourceUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).HttpConfig input := &pa.UpdateHostSourceCommandInput{Body: *resourcePingAccessHTTPConfigRequestHostSourceReadData(d)} result, _, err := svc.UpdateHostSourceCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update HttpConfigHostSource: %s", err)) + return diag.Errorf("unable to update HttpConfigHostSource: %s", err) } d.SetId("http_config_host_source") return resourcePingAccessHTTPConfigRequestHostSourceReadResult(d, result) } -func resourcePingAccessHTTPConfigRequestHostSourceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHTTPConfigRequestHostSourceDelete(_ context.Context, _ *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).HttpConfig _, err := svc.DeleteHostSourceCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete HttpConfigHostSource: %s", err)) + return diag.Errorf("unable to delete HttpConfigHostSource: %s", err) } return nil diff --git a/pingaccess/resource_pingaccess_https_listener.go b/pingaccess/resource_pingaccess_https_listener.go index 8838a467..be180ea8 100644 --- a/pingaccess/resource_pingaccess_https_listener.go +++ b/pingaccess/resource_pingaccess_https_listener.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -42,13 +41,13 @@ func resourcePingAccessHTTPSListenerSchema() map[string]*schema.Schema { } } -func resourcePingAccessHTTPSListenerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHTTPSListenerCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).HttpsListeners input := pa.GetHttpsListenersCommandInput{} result, _, err := svc.GetHttpsListenersCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to retrieving listener: %s", err)) + return diag.Errorf("unable to retrieving listener: %s", err) } name := d.Get("name").(string) @@ -58,22 +57,22 @@ func resourcePingAccessHTTPSListenerCreate(ctx context.Context, d *schema.Resour return resourcePingAccessHTTPSListenerReadResult(d, listener) } } - return diag.FromErr(fmt.Errorf("unable to manage listener: %s", err)) + return diag.Errorf("unable to manage listener: %s", err) } -func resourcePingAccessHTTPSListenerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHTTPSListenerRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).HttpsListeners input := &pa.GetHttpsListenerCommandInput{ Id: d.Id(), } result, _, err := svc.GetHttpsListenerCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read listener: %s", err)) + return diag.Errorf("unable to read listener: %s", err) } return resourcePingAccessHTTPSListenerReadResult(d, result) } -func resourcePingAccessHTTPSListenerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHTTPSListenerUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).HttpsListeners input := pa.UpdateHttpsListenerInput{ Body: *resourcePingAccessHTTPSListenerReadData(d), @@ -82,12 +81,12 @@ func resourcePingAccessHTTPSListenerUpdate(ctx context.Context, d *schema.Resour result, _, err := svc.UpdateHttpsListener(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update listener: %s", err)) + return diag.Errorf("unable to update listener: %s", err) } return resourcePingAccessHTTPSListenerReadResult(d, result) } -func resourcePingAccessHTTPSListenerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHTTPSListenerDelete(_ context.Context, _ *schema.ResourceData, _ interface{}) diag.Diagnostics { return nil } diff --git a/pingaccess/resource_pingaccess_identity_mapping.go b/pingaccess/resource_pingaccess_identity_mapping.go index a79dc221..9375cb1e 100644 --- a/pingaccess/resource_pingaccess_identity_mapping.go +++ b/pingaccess/resource_pingaccess_identity_mapping.go @@ -2,11 +2,9 @@ package pingaccess import ( "context" - "crypto/tls" "encoding/json" "fmt" "log" - "net/http" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -27,7 +25,10 @@ func resourcePingAccessIdentityMapping() *schema.Resource { Schema: resourcePingAccessIdentityMappingSchema(), CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { svc := m.(*pingaccess.Client).IdentityMappings - desc, _, _ := svc.GetIdentityMappingDescriptorsCommand() + desc, _, err := svc.GetIdentityMappingDescriptorsCommand() + if err != nil { + return fmt.Errorf("unable to retrieve IdentityMapping descriptors %s", err) + } className := d.Get("class_name").(string) if err := descriptorsHasClassName(className, desc); err != nil { return err @@ -55,7 +56,7 @@ func resourcePingAccessIdentityMappingSchema() map[string]*schema.Schema { } } -func resourcePingAccessIdentityMappingCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessIdentityMappingCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).IdentityMappings input := pingaccess.AddIdentityMappingCommandInput{ Body: *resourcePingAccessIdentityMappingReadData(d), @@ -63,14 +64,14 @@ func resourcePingAccessIdentityMappingCreate(ctx context.Context, d *schema.Reso result, _, err := svc.AddIdentityMappingCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create IdentityMapping: %s", err)) + return diag.Errorf("unable to create IdentityMapping: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessIdentityMappingReadResult(d, result, svc) } -func resourcePingAccessIdentityMappingRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessIdentityMappingRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).IdentityMappings input := &pingaccess.GetIdentityMappingCommandInput{ Id: d.Id(), @@ -78,12 +79,12 @@ func resourcePingAccessIdentityMappingRead(ctx context.Context, d *schema.Resour result, _, err := svc.GetIdentityMappingCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read IdentityMapping: %s", err)) + return diag.Errorf("unable to read IdentityMapping: %s", err) } return resourcePingAccessIdentityMappingReadResult(d, result, svc) } -func resourcePingAccessIdentityMappingUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessIdentityMappingUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).IdentityMappings input := pingaccess.UpdateIdentityMappingCommandInput{ Body: *resourcePingAccessIdentityMappingReadData(d), @@ -92,21 +93,19 @@ func resourcePingAccessIdentityMappingUpdate(ctx context.Context, d *schema.Reso result, _, err := svc.UpdateIdentityMappingCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update IdentityMapping: %s", err)) + return diag.Errorf("unable to update IdentityMapping: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessIdentityMappingReadResult(d, result, svc) } -func resourcePingAccessIdentityMappingDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessIdentityMappingDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).IdentityMappings - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - log.Printf("[INFO] ResourceID: %s", d.Id()) _, err := svc.DeleteIdentityMappingCommand(&pingaccess.DeleteIdentityMappingCommandInput{Id: d.Id()}) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete IdentityMapping: %s", err)) + return diag.Errorf("unable to delete IdentityMapping: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_keypair.go b/pingaccess/resource_pingaccess_keypair.go index 8c5a8d8a..2d8eb0f8 100644 --- a/pingaccess/resource_pingaccess_keypair.go +++ b/pingaccess/resource_pingaccess_keypair.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -46,13 +45,72 @@ func resourcePingAccessKeyPairSchema() map[string]*schema.Schema { }, }, "file_data": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, + RequiredWith: []string{"file_data", "password"}, }, "password": { - Type: schema.TypeString, - Required: true, - Sensitive: true, + Type: schema.TypeString, + Sensitive: true, + Optional: true, + ConflictsWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, + RequiredWith: []string{"file_data", "password"}, + }, + "city": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"file_data", "password"}, + RequiredWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, + }, + "common_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"file_data", "password"}, + RequiredWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, + }, + "country": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"file_data", "password"}, + RequiredWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, + }, + "key_algorithm": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"file_data", "password"}, + RequiredWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, + }, + "key_size": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"file_data", "password"}, + RequiredWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, + }, + "organization": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"file_data", "password"}, + RequiredWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, + }, + "organization_unit": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"file_data", "password"}, + RequiredWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, + }, + "state": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"file_data", "password"}, + RequiredWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, + }, + //"subject_alternative_names": {}, + "valid_days": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"file_data", "password"}, + RequiredWith: []string{"city", "common_name", "country", "key_algorithm", "key_size", "organization", "organization_unit", "state", "valid_days"}, }, "csr_pending": { Type: schema.TypeBool, @@ -63,7 +121,7 @@ func resourcePingAccessKeyPairSchema() map[string]*schema.Schema { Computed: true, }, "hsm_provider_id": { - Type: schema.TypeString, + Type: schema.TypeInt, Optional: true, Default: "0", }, @@ -106,40 +164,65 @@ func resourcePingAccessKeyPairSchema() map[string]*schema.Schema { } } -func resourcePingAccessKeyPairCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - input := pa.ImportKeyPairCommandInput{ - Body: pa.PKCS12FileImportDocView{ - Alias: String(d.Get("alias").(string)), - FileData: String(d.Get("file_data").(string)), - Password: String(d.Get("password").(string)), - ChainCertificates: &[]*string{}, - }, +func resourcePingAccessKeyPairCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + svc := m.(*pa.Client).KeyPairs + if _, ok := d.GetOk("file_data"); ok { + input := pa.ImportKeyPairCommandInput{ + Body: pa.PKCS12FileImportDocView{ + Alias: String(d.Get("alias").(string)), + FileData: String(d.Get("file_data").(string)), + Password: String(d.Get("password").(string)), + ChainCertificates: &[]*string{}, + HsmProviderId: Int(d.Get("hsm_provider_id").(int)), + }, + } + result, _, err := svc.ImportKeyPairCommand(&input) + if err != nil { + return diag.Errorf("unable to create KeyPair: %s", err) + } + + d.SetId(strconv.Itoa(*result.Id)) + return resourcePingAccessKeyPairReadResult(d, result) } - svc := m.(*pa.Client).KeyPairs + input := pa.GenerateKeyPairCommandInput{ + Body: pa.NewKeyPairConfigView{ + Alias: String(d.Get("alias").(string)), + City: String(d.Get("city").(string)), + CommonName: String(d.Get("common_name").(string)), + Country: String(d.Get("country").(string)), + KeyAlgorithm: String(d.Get("key_algorithm").(string)), + KeySize: Int(d.Get("key_size").(int)), + Organization: String(d.Get("organization").(string)), + OrganizationUnit: String(d.Get("organization_unit").(string)), + State: String(d.Get("state").(string)), + ValidDays: Int(d.Get("valid_days").(int)), + HsmProviderId: Int(d.Get("hsm_provider_id").(int)), + }, + } - result, _, err := svc.ImportKeyPairCommand(&input) + result, _, err := svc.GenerateKeyPairCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create KeyPair: %s", err)) + return diag.Errorf("unable to generate KeyPair: %s", err) } d.SetId(strconv.Itoa(*result.Id)) return resourcePingAccessKeyPairReadResult(d, result) } -func resourcePingAccessKeyPairRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessKeyPairRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).KeyPairs input := &pa.GetKeyPairCommandInput{ Id: d.Id(), } result, _, err := svc.GetKeyPairCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read KeyPair: %s", err)) + return diag.Errorf("unable to read KeyPair: %s", err) } return resourcePingAccessKeyPairReadResult(d, result) } -func resourcePingAccessKeyPairUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessKeyPairUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { input := pa.UpdateKeyPairCommandInput{ Body: pa.PKCS12FileImportDocView{ Alias: String(d.Get("alias").(string)), @@ -158,18 +241,18 @@ func resourcePingAccessKeyPairUpdate(ctx context.Context, d *schema.ResourceData result, _, err := svc.UpdateKeyPairCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update KeyPair: %s", err)) + return diag.Errorf("unable to update KeyPair: %s", err) } d.SetId(strconv.Itoa(*result.Id)) return resourcePingAccessKeyPairReadResult(d, result) } -func resourcePingAccessKeyPairDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessKeyPairDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).KeyPairs _, err := svc.DeleteKeyPairCommand(&pa.DeleteKeyPairCommandInput{Id: d.Id()}) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete KeyPair: %s", err)) + return diag.Errorf("unable to delete KeyPair: %s", err) } return nil } @@ -185,7 +268,7 @@ func resourcePingAccessKeyPairReadResult(d *schema.ResourceData, rv *pa.KeyPairV } setResourceDataBoolWithDiagnostic(d, "csr_pending", rv.CsrPending, &diags) setResourceDataIntWithDiagnostic(d, "expires", rv.Expires, &diags) - setResourceDataStringWithDiagnostic(d, "hsm_provider_id", String(strconv.Itoa(*rv.HsmProviderId)), &diags) + setResourceDataIntWithDiagnostic(d, "hsm_provider_id", rv.HsmProviderId, &diags) setResourceDataStringWithDiagnostic(d, "issuer_dn", rv.IssuerDn, &diags) setResourceDataStringWithDiagnostic(d, "md5sum", rv.Md5sum, &diags) setResourceDataStringWithDiagnostic(d, "serial_number", rv.SerialNumber, &diags) diff --git a/pingaccess/resource_pingaccess_keypair_test.go b/pingaccess/resource_pingaccess_keypair_test.go index 7734693c..5635297e 100644 --- a/pingaccess/resource_pingaccess_keypair_test.go +++ b/pingaccess/resource_pingaccess_keypair_test.go @@ -11,6 +11,7 @@ import ( func TestAccPingAccessKeyPair(t *testing.T) { resourceName := "pingaccess_keypair.test" + resourceNameGen := "pingaccess_keypair.test_generate" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -35,10 +36,20 @@ func TestAccPingAccessKeyPair(t *testing.T) { ), }, { - Config: testAccPingAccessKeyPairConfig(), + Config: testAccPingAccessKeyPairConfigGenerate(), Check: resource.ComposeTestCheckFunc( - testAccCheckPingAccessKeyPairExists(resourceName), - testAccCheckPingAccessKeyPairAttributes(resourceName), + testAccCheckPingAccessKeyPairExists(resourceNameGen), + testAccCheckPingAccessKeyPairAttributes(resourceNameGen), + resource.TestCheckResourceAttrSet(resourceNameGen, "md5sum"), + resource.TestCheckResourceAttrSet(resourceNameGen, "expires"), + resource.TestCheckResourceAttr(resourceNameGen, "issuer_dn", "CN=Test, OU=Test, O=Test, L=Test, ST=Test, C=GB"), + resource.TestCheckResourceAttrSet(resourceNameGen, "serial_number"), + resource.TestCheckResourceAttrSet(resourceNameGen, "sha1sum"), + resource.TestCheckResourceAttr(resourceNameGen, "signature_algorithm", "SHA256withRSA"), + resource.TestCheckResourceAttr(resourceNameGen, "status", "Valid"), + resource.TestCheckResourceAttr(resourceNameGen, "subject_dn", "CN=Test, OU=Test, O=Test, L=Test, ST=Test, C=GB"), + resource.TestCheckResourceAttr(resourceNameGen, "subject_cn", "Test"), + resource.TestCheckResourceAttrSet(resourceNameGen, "valid_from"), ), }, }, @@ -58,6 +69,22 @@ func testAccPingAccessKeyPairConfig() string { }` } +func testAccPingAccessKeyPairConfigGenerate() string { + return ` + resource "pingaccess_keypair" "test_generate" { + alias = "test2" + city = "Test" + common_name = "Test" + country = "GB" + key_algorithm = "RSA" + key_size = 2048 + organization = "Test" + organization_unit = "Test" + state = "Test" + valid_days = 365 + }` +} + func testAccCheckPingAccessKeyPairExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/pingaccess/resource_pingaccess_oauth_server.go b/pingaccess/resource_pingaccess_oauth_server.go index 4c6b3996..4a4817ca 100644 --- a/pingaccess/resource_pingaccess_oauth_server.go +++ b/pingaccess/resource_pingaccess_oauth_server.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" @@ -85,35 +84,35 @@ func resourcePingAccessOAuthServerCreate(ctx context.Context, d *schema.Resource return resourcePingAccessOAuthServerUpdate(ctx, d, m) } -func resourcePingAccessOAuthServerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessOAuthServerRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Oauth result, _, err := svc.GetAuthorizationServerCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to read OAuthServerSettings: %s", err)) + return diag.Errorf("unable to read OAuthServerSettings: %s", err) } return resourcePingAccessOAuthServerReadResult(d, result) } -func resourcePingAccessOAuthServerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessOAuthServerUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Oauth input := pa.UpdateAuthorizationServerCommandInput{ Body: *resourcePingAccessOAuthServerReadData(d), } result, _, err := svc.UpdateAuthorizationServerCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update OAuthServerSettings: %s", err)) + return diag.Errorf("unable to update OAuthServerSettings: %s", err) } d.SetId("oauth_server_settings") return resourcePingAccessOAuthServerReadResult(d, result) } -func resourcePingAccessOAuthServerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessOAuthServerDelete(_ context.Context, _ *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Oauth _, err := svc.DeleteAuthorizationServerCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to reset OAuthServerSettings: %s", err)) + return diag.Errorf("unable to reset OAuthServerSettings: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_pingfederate_admin.go b/pingaccess/resource_pingaccess_pingfederate_admin.go index 4d58baf0..1c18f095 100644 --- a/pingaccess/resource_pingaccess_pingfederate_admin.go +++ b/pingaccess/resource_pingaccess_pingfederate_admin.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" @@ -70,35 +69,35 @@ func resourcePingAccessPingFederateAdminCreate(ctx context.Context, d *schema.Re return resourcePingAccessPingFederateAdminUpdate(ctx, d, m) } -func resourcePingAccessPingFederateAdminRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessPingFederateAdminRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Pingfederate result, _, err := svc.GetPingFederateAdminCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to read PingFederateAdmin: %s", err)) + return diag.Errorf("unable to read PingFederateAdmin: %s", err) } return resourcePingAccessPingFederateAdminReadResult(d, result) } -func resourcePingAccessPingFederateAdminUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessPingFederateAdminUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Pingfederate input := pa.UpdatePingFederateAdminCommandInput{ Body: *resourcePingAccessPingFederateAdminReadData(d), } result, _, err := svc.UpdatePingFederateAdminCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update PingFederateAdmin: %s", err)) + return diag.Errorf("unable to update PingFederateAdmin: %s", err) } d.SetId("pingfederate_admin_settings") return resourcePingAccessPingFederateAdminReadResult(d, result) } -func resourcePingAccessPingFederateAdminDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessPingFederateAdminDelete(_ context.Context, _ *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Pingfederate _, err := svc.DeletePingFederateCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to reset PingFederateAdmin: %s", err)) + return diag.Errorf("unable to reset PingFederateAdmin: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_pingfederate_admin_test.go b/pingaccess/resource_pingaccess_pingfederate_admin_test.go index 04996caf..bbb9367d 100644 --- a/pingaccess/resource_pingaccess_pingfederate_admin_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_admin_test.go @@ -2,6 +2,7 @@ package pingaccess import ( "fmt" + "net/url" "os" "testing" @@ -14,19 +15,20 @@ import ( ) func TestAccPingAccessPingFederateAdmin(t *testing.T) { + u, _ := url.Parse(os.Getenv("PINGFEDERATE_TEST_IP")) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckPingAccessPingFederateAdminDestroy, Steps: []resource.TestStep{ { - Config: testAccPingAccessPingFederateAdminConfig(os.Getenv("PINGFEDERATE_TEST_IP"), true), + Config: testAccPingAccessPingFederateAdminConfig(u.Hostname(), u.Port(), "ON"), Check: resource.ComposeTestCheckFunc( testAccCheckPingAccessPingFederateAdminExists("pingaccess_pingfederate_admin.demo"), ), }, { - Config: testAccPingAccessPingFederateAdminConfig(os.Getenv("PINGFEDERATE_TEST_IP"), true), + Config: testAccPingAccessPingFederateAdminConfig(u.Hostname(), u.Port(), "OFF"), Check: resource.ComposeTestCheckFunc( testAccCheckPingAccessPingFederateAdminExists("pingaccess_pingfederate_admin.demo"), ), @@ -39,7 +41,7 @@ func testAccCheckPingAccessPingFederateAdminDestroy(s *terraform.State) error { return nil } -func testAccPingAccessPingFederateAdminConfig(issuer string, proxy bool) string { +func testAccPingAccessPingFederateAdminConfig(host, port, configChange string) string { return fmt.Sprintf(` resource "pingaccess_pingfederate_admin" "demo" { admin_username = "Administrator" @@ -47,13 +49,13 @@ func testAccPingAccessPingFederateAdminConfig(issuer string, proxy bool) string value = "2FederateM0re" } base_path = "/path" - audit_level = "ON" + audit_level = "%s" host = "%s" - port = 9031 - secure = false + port = %s + secure = true trusted_certificate_group_id = 2 - use_proxy = %v - }`, issuer, proxy) + use_proxy = false + }`, configChange, host, port) } func testAccCheckPingAccessPingFederateAdminExists(n string) resource.TestCheckFunc { diff --git a/pingaccess/resource_pingaccess_pingfederate_oauth.go b/pingaccess/resource_pingaccess_pingfederate_oauth.go index 4d466276..8b1e5f89 100644 --- a/pingaccess/resource_pingaccess_pingfederate_oauth.go +++ b/pingaccess/resource_pingaccess_pingfederate_oauth.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" @@ -70,35 +69,35 @@ func resourcePingAccessPingFederateOAuthCreate(ctx context.Context, d *schema.Re return resourcePingAccessPingFederateOAuthUpdate(ctx, d, m) } -func resourcePingAccessPingFederateOAuthRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessPingFederateOAuthRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Pingfederate result, _, err := svc.GetPingFederateAccessTokensCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to reset PingFederateOAuth: %s", err)) + return diag.Errorf("unable to read PingFederateOAuth: %s", err) } return resourcePingAccessPingFederateOAuthReadResult(d, result) } -func resourcePingAccessPingFederateOAuthUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessPingFederateOAuthUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Pingfederate input := pa.UpdatePingFederateAccessTokensCommandInput{ Body: *resourcePingAccessPingFederateOAuthReadData(d), } result, _, err := svc.UpdatePingFederateAccessTokensCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to reset PingFederateOAuth: %s", err)) + return diag.Errorf("unable to update PingFederateOAuth: %s", err) } d.SetId("pingfederate_oauth_settings") return resourcePingAccessPingFederateOAuthReadResult(d, result) } -func resourcePingAccessPingFederateOAuthDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessPingFederateOAuthDelete(_ context.Context, _ *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Pingfederate _, err := svc.DeletePingFederateAccessTokensCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to reset PingFederateOAuth: %s", err)) + return diag.Errorf("unable to reset PingFederateOAuth: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_pingfederate_runtime.go b/pingaccess/resource_pingaccess_pingfederate_runtime.go index 352e7e98..71a14301 100644 --- a/pingaccess/resource_pingaccess_pingfederate_runtime.go +++ b/pingaccess/resource_pingaccess_pingfederate_runtime.go @@ -2,8 +2,6 @@ package pingaccess import ( "context" - "fmt" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -25,23 +23,96 @@ func resourcePingAccessPingFederateRuntime() *schema.Resource { func resourcePingAccessPingFederateRuntimeSchema() map[string]*schema.Schema { return map[string]*schema.Schema{ + //New API "description": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + RequiredWith: []string{"issuer"}, + ConflictsWith: []string{"audit_level", "back_channel_base_path", "back_channel_secure", "base_path", "expected_hostname", "host", "port", "secure", "targets"}, }, "issuer": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"audit_level", "back_channel_base_path", "back_channel_secure", "base_path", "expected_hostname", "host", "port", "secure", "targets"}, + }, + "sts_token_exchange_endpoint": { + Type: schema.TypeString, + Optional: true, + RequiredWith: []string{"issuer"}, + ConflictsWith: []string{"audit_level", "back_channel_base_path", "back_channel_secure", "base_path", "expected_hostname", "host", "port", "secure", "targets"}, + }, + + //Deprecated API + "audit_level": { + Type: schema.TypeString, + Optional: true, + Default: "ON", + ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, + Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + }, + "back_channel_base_path": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, + Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + }, + "back_channel_secure": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, + Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + }, + "base_path": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, + Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + }, + "expected_hostname": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, + Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + }, + "host": { + Type: schema.TypeString, + Optional: true, + RequiredWith: []string{"host", "port"}, + ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, + Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + }, + "port": { + Type: schema.TypeInt, + Optional: true, + RequiredWith: []string{"host", "port"}, + ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, + Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + }, + "secure": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, + Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + }, + "targets": { + Type: schema.TypeSet, + Optional: true, + MinItems: 0, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, + Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", }, + + //Common "skip_hostname_verification": { Type: schema.TypeBool, Optional: true, Default: true, }, - "sts_token_exchange_endpoint": { - Type: schema.TypeString, - Optional: true, - }, "trusted_certificate_group_id": { Type: schema.TypeInt, Optional: true, @@ -65,35 +136,66 @@ func resourcePingAccessPingFederateRuntimeCreate(ctx context.Context, d *schema. return resourcePingAccessPingFederateRuntimeUpdate(ctx, d, m) } -func resourcePingAccessPingFederateRuntimeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessPingFederateRuntimeRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Pingfederate - result, _, err := svc.GetPingFederateRuntimeCommand() + if _, ok := d.GetOk("issuer"); ok { + result, _, err := svc.GetPingFederateRuntimeCommand() + if err != nil { + return diag.Errorf("unable to read PingFederateRuntime: %s", err) + } + + return resourcePingAccessPingFederateRuntimeReadResult(d, result) + } + + result, _, err := svc.GetPingFederateCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to read PingFederateRuntime: %s", err)) + return diag.Errorf("unable to read deprecated PingFederateRuntime: %s", err) } - return resourcePingAccessPingFederateRuntimeReadResult(d, result) + return resourcePingAccessPingFederateDeprecatedRuntimeReadResult(d, result) } -func resourcePingAccessPingFederateRuntimeUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessPingFederateRuntimeUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Pingfederate - input := pa.UpdatePingFederateRuntimeCommandInput{ - Body: *resourcePingAccessPingFederateRuntimeReadData(d), + + if _, ok := d.GetOk("issuer"); ok { + input := pa.UpdatePingFederateRuntimeCommandInput{ + Body: *resourcePingAccessPingFederateRuntimeReadData(d), + } + result, _, err := svc.UpdatePingFederateRuntimeCommand(&input) + if err != nil { + return diag.Errorf("unable to update PingFederateRuntime: %s", err) + } + + d.SetId("pingfederate_runtime_settings") + return resourcePingAccessPingFederateRuntimeReadResult(d, result) + } + + input := pa.UpdatePingFederateCommandInput{ + Body: *resourcePingAccessPingFederateDeprecatedRuntimeReadData(d), } - result, _, err := svc.UpdatePingFederateRuntimeCommand(&input) + result, _, err := svc.UpdatePingFederateCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update PingFederateRuntime: %s", err)) + return diag.Errorf("error updating deprecated PingFederateRuntime: %s", err) } d.SetId("pingfederate_runtime_settings") - return resourcePingAccessPingFederateRuntimeReadResult(d, result) + return resourcePingAccessPingFederateDeprecatedRuntimeReadResult(d, result) } -func resourcePingAccessPingFederateRuntimeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessPingFederateRuntimeDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Pingfederate + if _, ok := d.GetOk("issuer"); ok { + _, err := svc.DeletePingFederateRuntimeCommand() + if err != nil { + return diag.Errorf("unable to reset PingFederateRuntime: %s", err) + } + return nil + } + _, err := svc.DeletePingFederateCommand() if err != nil { - return diag.FromErr(fmt.Errorf("unable to reset PingFederateRuntime: %s", err)) + return diag.Errorf("unable to reset deprecated PingFederateRuntime: %s", err) } return nil } @@ -110,9 +212,34 @@ func resourcePingAccessPingFederateRuntimeReadResult(d *schema.ResourceData, inp return diags } +func resourcePingAccessPingFederateDeprecatedRuntimeReadResult(d *schema.ResourceData, input *pa.PingFederateRuntimeView) diag.Diagnostics { + var diags diag.Diagnostics + setResourceDataStringWithDiagnostic(d, "audit_level", input.AuditLevel, &diags) + setResourceDataStringWithDiagnostic(d, "back_channel_base_path", input.BackChannelBasePath, &diags) + setResourceDataBoolWithDiagnostic(d, "back_channel_secure", input.BackChannelSecure, &diags) + setResourceDataStringWithDiagnostic(d, "base_path", input.BasePath, &diags) + setResourceDataStringWithDiagnostic(d, "expected_hostname", input.ExpectedHostname, &diags) + setResourceDataStringWithDiagnostic(d, "host", input.Host, &diags) + setResourceDataIntWithDiagnostic(d, "port", input.Port, &diags) + setResourceDataBoolWithDiagnostic(d, "secure", input.Secure, &diags) + setResourceDataBoolWithDiagnostic(d, "skip_hostname_verification", input.SkipHostnameVerification, &diags) + setResourceDataIntWithDiagnostic(d, "trusted_certificate_group_id", input.TrustedCertificateGroupId, &diags) + setResourceDataBoolWithDiagnostic(d, "use_proxy", input.UseProxy, &diags) + setResourceDataBoolWithDiagnostic(d, "use_slo", input.UseSlo, &diags) + if input.Targets != nil { + if err := d.Set("targets", input.Targets); err != nil { + diags = append(diags, diag.FromErr(err)...) + } + } + return diags +} + func resourcePingAccessPingFederateRuntimeReadData(d *schema.ResourceData) *pa.PingFederateMetadataRuntimeView { pfRuntime := &pa.PingFederateMetadataRuntimeView{ - Issuer: String(d.Get("issuer").(string)), + Issuer: String(d.Get("issuer").(string)), + TrustedCertificateGroupId: Int(d.Get("trusted_certificate_group_id").(int)), + UseProxy: Bool(d.Get("use_proxy").(bool)), + UseSlo: Bool(d.Get("use_slo").(bool)), } if v, ok := d.GetOk("description"); ok { pfRuntime.Description = String(v.(string)) @@ -121,8 +248,51 @@ func resourcePingAccessPingFederateRuntimeReadData(d *schema.ResourceData) *pa.P if v, ok := d.GetOk("sts_token_exchange_endpoint"); ok { pfRuntime.StsTokenExchangeEndpoint = String(v.(string)) } - pfRuntime.TrustedCertificateGroupId = Int(d.Get("trusted_certificate_group_id").(int)) - pfRuntime.UseProxy = Bool(d.Get("use_proxy").(bool)) - pfRuntime.UseSlo = Bool(d.Get("use_slo").(bool)) + + return pfRuntime +} + +func resourcePingAccessPingFederateDeprecatedRuntimeReadData(d *schema.ResourceData) *pa.PingFederateRuntimeView { + pfRuntime := &pa.PingFederateRuntimeView{ + Host: String(d.Get("host").(string)), + Port: Int(d.Get("port").(int)), + TrustedCertificateGroupId: Int(d.Get("trusted_certificate_group_id").(int)), + UseProxy: Bool(d.Get("use_proxy").(bool)), + UseSlo: Bool(d.Get("use_slo").(bool)), + } + + if v, ok := d.GetOk("audit_level"); ok { + pfRuntime.AuditLevel = String(v.(string)) + } + + if v, ok := d.GetOk("back_channel_base_path"); ok { + pfRuntime.BackChannelBasePath = String(v.(string)) + } + + if v, ok := d.GetOk("back_channel_secure"); ok { + pfRuntime.BackChannelSecure = Bool(v.(bool)) + } + + if v, ok := d.GetOk("base_path"); ok { + pfRuntime.BasePath = String(v.(string)) + } + + if v, ok := d.GetOk("expected_hostname"); ok { + pfRuntime.ExpectedHostname = String(v.(string)) + } + + if v, ok := d.GetOk("secure"); ok { + pfRuntime.Secure = Bool(v.(bool)) + } + + if v, ok := d.GetOk("skip_hostname_verification"); ok { + pfRuntime.SkipHostnameVerification = Bool(v.(bool)) + } + + if v, ok := d.GetOk("targets"); ok { + targets := expandStringList(v.(*schema.Set).List()) + pfRuntime.Targets = &targets + } + return pfRuntime } diff --git a/pingaccess/resource_pingaccess_pingfederate_runtime_test.go b/pingaccess/resource_pingaccess_pingfederate_runtime_test.go index 1f823449..670c99d8 100644 --- a/pingaccess/resource_pingaccess_pingfederate_runtime_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_runtime_test.go @@ -2,6 +2,7 @@ package pingaccess import ( "fmt" + "net/url" "os" "testing" @@ -14,64 +15,144 @@ import ( ) func TestAccPingAccessPingFederateRuntime(t *testing.T) { - resource.ParallelTest(t, resource.TestCase{ + resourceName := "pingaccess_pingfederate_runtime.demo" + u, _ := url.Parse(os.Getenv("PINGFEDERATE_TEST_IP")) + + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckPingAccessPingFederateRuntimeDestroy, Steps: []resource.TestStep{ { - Config: testAccPingAccessPingFederateRuntimeConfig(fmt.Sprintf("https://%s:9031", os.Getenv("PINGFEDERATE_TEST_IP"))), + Config: testAccPingAccessPingFederateRuntimeConfig(os.Getenv("PINGFEDERATE_TEST_IP"), "foo"), + Check: resource.ComposeTestCheckFunc( + testAccCheckPingAccessPingFederateRuntimeExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "description", "foo"), + resource.TestCheckResourceAttr(resourceName, "skip_hostname_verification", "true"), + resource.TestCheckResourceAttr(resourceName, "sts_token_exchange_endpoint", "https://foo/bar"), + resource.TestCheckResourceAttr(resourceName, "use_slo", "false"), + resource.TestCheckResourceAttr(resourceName, "trusted_certificate_group_id", "2"), + resource.TestCheckResourceAttr(resourceName, "use_proxy", "true"), + ), + }, + { + Config: testAccPingAccessPingFederateRuntimeConfig(os.Getenv("PINGFEDERATE_TEST_IP"), "bar"), + Check: resource.ComposeTestCheckFunc( + testAccCheckPingAccessPingFederateRuntimeExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "description", "bar"), + resource.TestCheckResourceAttr(resourceName, "skip_hostname_verification", "true"), + resource.TestCheckResourceAttr(resourceName, "sts_token_exchange_endpoint", "https://foo/bar"), + resource.TestCheckResourceAttr(resourceName, "use_slo", "false"), + resource.TestCheckResourceAttr(resourceName, "trusted_certificate_group_id", "2"), + resource.TestCheckResourceAttr(resourceName, "use_proxy", "true"), + ), + }, + { + Config: testAccPingAccessPingFederateDeprecatedRuntimeConfig(u.Hostname(), u.Port(), "ON"), Check: resource.ComposeTestCheckFunc( - testAccCheckPingAccessPingFederateRuntimeExists("pingaccess_pingfederate_runtime.demo_pfr"), + testAccCheckPingAccessPingFederateDeprecatedRuntimeExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "skip_hostname_verification", "true"), + resource.TestCheckResourceAttr(resourceName, "use_slo", "false"), + resource.TestCheckResourceAttr(resourceName, "trusted_certificate_group_id", "2"), + resource.TestCheckResourceAttr(resourceName, "use_proxy", "true"), + resource.TestCheckResourceAttr(resourceName, "audit_level", "ON"), + resource.TestCheckResourceAttr(resourceName, "back_channel_secure", "false"), + resource.TestCheckResourceAttr(resourceName, "host", u.Hostname()), + resource.TestCheckResourceAttr(resourceName, "port", u.Port()), + resource.TestCheckResourceAttr(resourceName, "secure", "false"), ), }, { - Config: testAccPingAccessPingFederateRuntimeConfig(fmt.Sprintf("https://%s:9031", os.Getenv("PINGFEDERATE_TEST_IP"))), + Config: testAccPingAccessPingFederateDeprecatedRuntimeConfig(u.Hostname(), u.Port(), "OFF"), Check: resource.ComposeTestCheckFunc( - testAccCheckPingAccessPingFederateRuntimeExists("pingaccess_pingfederate_runtime.demo_pfr"), + testAccCheckPingAccessPingFederateDeprecatedRuntimeExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "skip_hostname_verification", "true"), + resource.TestCheckResourceAttr(resourceName, "use_slo", "false"), + resource.TestCheckResourceAttr(resourceName, "trusted_certificate_group_id", "2"), + resource.TestCheckResourceAttr(resourceName, "use_proxy", "true"), + resource.TestCheckResourceAttr(resourceName, "audit_level", "OFF"), + resource.TestCheckResourceAttr(resourceName, "back_channel_secure", "false"), + resource.TestCheckResourceAttr(resourceName, "host", u.Hostname()), + resource.TestCheckResourceAttr(resourceName, "port", u.Port()), + resource.TestCheckResourceAttr(resourceName, "secure", "false"), ), }, }, }) } -func testAccCheckPingAccessPingFederateRuntimeDestroy(s *terraform.State) error { +func testAccCheckPingAccessPingFederateRuntimeDestroy(_ *terraform.State) error { return nil } -func testAccPingAccessPingFederateRuntimeConfig(issuer string) string { +func testAccPingAccessPingFederateRuntimeConfig(issuer, configChange string) string { return fmt.Sprintf(` - resource "pingaccess_pingfederate_runtime" "demo_pfr" { - description = "foo" + resource "pingaccess_pingfederate_runtime" "demo" { + description = "%s" issuer = "%s" skip_hostname_verification = true sts_token_exchange_endpoint = "https://foo/bar" use_slo = false trusted_certificate_group_id = 2 use_proxy = true - }`, issuer) + }`, configChange, issuer) +} + +func testAccPingAccessPingFederateDeprecatedRuntimeConfig(host, port, configChange string) string { + return fmt.Sprintf(` + resource "pingaccess_pingfederate_runtime" "demo" { + host = "%s" + port = %s + audit_level = "%s" + skip_hostname_verification = true + use_slo = false + trusted_certificate_group_id = 2 + use_proxy = true + }`, host, port, configChange) } func testAccCheckPingAccessPingFederateRuntimeExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" || rs.Primary.ID == "0" { - return fmt.Errorf("No third party service ID is set") + return fmt.Errorf("not found: %s", n) } conn := testAccProvider.Meta().(*pa.Client).Pingfederate result, _, err := conn.GetPingFederateRuntimeCommand() if err != nil { - return fmt.Errorf("Error: PingFederateRuntime (%s) not found", n) + return fmt.Errorf("error: PingFederateRuntime (%s) not found", n) } if *result.Issuer != rs.Primary.Attributes["issuer"] { - return fmt.Errorf("Error: PingFederateRuntime response (%s) didnt match state (%s)", *result.Issuer, rs.Primary.Attributes["issuer"]) + return fmt.Errorf("error: PingFederateRuntime response (%s) didnt match state (%s)", *result.Issuer, rs.Primary.Attributes["issuer"]) + } + + return nil + } +} + +func testAccCheckPingAccessPingFederateDeprecatedRuntimeExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("not found: %s", n) + } + + conn := testAccProvider.Meta().(*pa.Client).Pingfederate + result, resp, err := conn.GetPingFederateCommand() + + if err != nil { + return fmt.Errorf("error: PingFederateDeprecatedRuntime (%s) not found", n) + } + + if result == nil { + return fmt.Errorf("error: PingFederateDeprecatedRuntime (%s) not response %v", err, resp) + } + + if *result.Host != rs.Primary.Attributes["host"] { + return fmt.Errorf("error: PingFederateDeprecatedRuntime response (%s) didnt match state (%s)", *result.Host, rs.Primary.Attributes["host"]) } return nil @@ -116,3 +197,50 @@ func Test_resourcePingAccessPingFederateRuntimeReadData(t *testing.T) { }) } } + +func Test_resourcePingAccessPingFederateDeprecatedRuntimeReadData(t *testing.T) { + cases := []struct { + PingFederateRuntime pa.PingFederateRuntimeView + }{ + { + PingFederateRuntime: pa.PingFederateRuntimeView{ + Host: String("localhost"), + Port: Int(9031), + SkipHostnameVerification: Bool(true), + UseProxy: Bool(false), + UseSlo: Bool(false), + AuditLevel: String("ON"), + TrustedCertificateGroupId: Int(0), + }, + }, + { + PingFederateRuntime: pa.PingFederateRuntimeView{ + Host: String("localhost"), + Port: Int(9031), + AuditLevel: String("ON"), + BackChannelBasePath: String("/foo"), + BackChannelSecure: Bool(true), + BasePath: String("/bar"), + ExpectedHostname: String("hosty"), + Secure: Bool(true), + Targets: &[]*string{String("t1:9031")}, + SkipHostnameVerification: Bool(true), + TrustedCertificateGroupId: Int(2), + UseProxy: Bool(true), + UseSlo: Bool(true), + }, + }, + } + for i, tc := range cases { + t.Run(fmt.Sprintf("tc:%v", i), func(t *testing.T) { + + resourceSchema := resourcePingAccessPingFederateRuntimeSchema() + resourceLocalData := schema.TestResourceDataRaw(t, resourceSchema, map[string]interface{}{}) + resourcePingAccessPingFederateDeprecatedRuntimeReadResult(resourceLocalData, &tc.PingFederateRuntime) + + if got := *resourcePingAccessPingFederateDeprecatedRuntimeReadData(resourceLocalData); !cmp.Equal(got, tc.PingFederateRuntime) { + t.Errorf("resourcePingAccessPingFederateRuntimeReadData() = %v", cmp.Diff(got, tc.PingFederateRuntime)) + } + }) + } +} diff --git a/pingaccess/resource_pingaccess_rule.go b/pingaccess/resource_pingaccess_rule.go index c01994f4..cf540f6f 100644 --- a/pingaccess/resource_pingaccess_rule.go +++ b/pingaccess/resource_pingaccess_rule.go @@ -23,7 +23,10 @@ func resourcePingAccessRule() *schema.Resource { Schema: resourcePingAccessRuleSchema(), CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { svc := m.(*pingaccess.Client).Rules - desc, _, _ := svc.GetRuleDescriptorsCommand() + desc, _, err := svc.GetRuleDescriptorsCommand() + if err != nil { + return fmt.Errorf("unable to retrieve Rule descriptors %s", err) + } className := d.Get("class_name").(string) if err := ruleDescriptorsHasClassName(className, desc); err != nil { return err @@ -52,7 +55,7 @@ func resourcePingAccessRuleSchema() map[string]*schema.Schema { } } -func resourcePingAccessRuleCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessRuleCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).Rules input := pingaccess.AddRuleCommandInput{ Body: *resourcePingAccessRuleReadData(d), @@ -60,14 +63,14 @@ func resourcePingAccessRuleCreate(ctx context.Context, d *schema.ResourceData, m result, _, err := svc.AddRuleCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create Rule: %s", err)) + return diag.Errorf("unable to create Rule: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessRuleReadResult(d, result, svc) } -func resourcePingAccessRuleRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessRuleRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).Rules input := &pingaccess.GetRuleCommandInput{ Id: d.Id(), @@ -75,13 +78,13 @@ func resourcePingAccessRuleRead(ctx context.Context, d *schema.ResourceData, m i result, _, err := svc.GetRuleCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read Rule: %s", err)) + return diag.Errorf("unable to read Rule: %s", err) } return resourcePingAccessRuleReadResult(d, result, svc) } -func resourcePingAccessRuleUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessRuleUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).Rules input := pingaccess.UpdateRuleCommandInput{ Body: *resourcePingAccessRuleReadData(d), @@ -90,18 +93,18 @@ func resourcePingAccessRuleUpdate(ctx context.Context, d *schema.ResourceData, m result, _, err := svc.UpdateRuleCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update Rule: %s", err)) + return diag.Errorf("unable to update Rule: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessRuleReadResult(d, result, svc) } -func resourcePingAccessRuleDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessRuleDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).Rules _, err := svc.DeleteRuleCommand(&pingaccess.DeleteRuleCommandInput{Id: d.Id()}) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete Rule: %s", err)) + return diag.Errorf("unable to delete Rule: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_rulesets.go b/pingaccess/resource_pingaccess_rulesets.go index 02dceaae..91fce526 100644 --- a/pingaccess/resource_pingaccess_rulesets.go +++ b/pingaccess/resource_pingaccess_rulesets.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -77,7 +76,7 @@ func resourcePingAccessRuleSetCreate(ctx context.Context, d *schema.ResourceData result, _, err := svc.AddRuleSetCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create RuleSet: %s", err)) + return diag.Errorf("unable to create RuleSet: %s", err) } d.SetId(result.Id.String()) @@ -90,7 +89,7 @@ func resourcePingAccessRuleSetRead(ctx context.Context, d *schema.ResourceData, Id: d.Id(), }) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read RuleSet: %s", err)) + return diag.Errorf("unable to read RuleSet: %s", err) } return resourcePingAccessRuleSetReadResult(d, result) } @@ -122,7 +121,7 @@ func resourcePingAccessRuleSetUpdate(ctx context.Context, d *schema.ResourceData result, _, err := svc.UpdateRuleSetCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update RuleSet: %s", err)) + return diag.Errorf("unable to update RuleSet: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessRuleSetReadResult(d, result) @@ -133,7 +132,7 @@ func resourcePingAccessRuleSetDelete(ctx context.Context, d *schema.ResourceData _, err := svc.DeleteRuleSetCommand(&pingaccess.DeleteRuleSetCommandInput{Id: d.Id()}) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete RuleSet: %s", err)) + return diag.Errorf("unable to delete RuleSet: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_site.go b/pingaccess/resource_pingaccess_site.go index 4f9b23c8..8d86e59b 100644 --- a/pingaccess/resource_pingaccess_site.go +++ b/pingaccess/resource_pingaccess_site.go @@ -2,9 +2,6 @@ package pingaccess import ( "context" - "crypto/tls" - "fmt" - "net/http" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -100,7 +97,7 @@ func resourcePingAccessSiteSchema() map[string]*schema.Schema { } } -func resourcePingAccessSiteCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessSiteCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Sites input := pa.AddSiteCommandInput{ Body: *resourcePingAccessSiteReadData(d), @@ -108,26 +105,26 @@ func resourcePingAccessSiteCreate(ctx context.Context, d *schema.ResourceData, m result, _, err := svc.AddSiteCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create Site: %s", err)) + return diag.Errorf("unable to create Site: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessSiteReadResult(d, result) } -func resourcePingAccessSiteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessSiteRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Sites input := &pa.GetSiteCommandInput{ Id: d.Id(), } result, _, err := svc.GetSiteCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read Site: %s", err)) + return diag.Errorf("unable to read Site: %s", err) } return resourcePingAccessSiteReadResult(d, result) } -func resourcePingAccessSiteUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessSiteUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Sites input := pa.UpdateSiteCommandInput{ Body: *resourcePingAccessSiteReadData(d), @@ -135,22 +132,20 @@ func resourcePingAccessSiteUpdate(ctx context.Context, d *schema.ResourceData, m } result, _, err := svc.UpdateSiteCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update Site: %s", err)) + return diag.Errorf("unable to update Site: %s", err) } return resourcePingAccessSiteReadResult(d, result) } -func resourcePingAccessSiteDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessSiteDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).Sites - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - input := &pa.DeleteSiteCommandInput{ Id: d.Id(), } _, err := svc.DeleteSiteCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete Site: %s", err)) + return diag.Errorf("unable to delete Site: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_site_authenticator.go b/pingaccess/resource_pingaccess_site_authenticator.go index a179b8fb..89e3edf4 100644 --- a/pingaccess/resource_pingaccess_site_authenticator.go +++ b/pingaccess/resource_pingaccess_site_authenticator.go @@ -2,10 +2,8 @@ package pingaccess import ( "context" - "crypto/tls" "encoding/json" "fmt" - "net/http" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -24,7 +22,10 @@ func resourcePingAccessSiteAuthenticator() *schema.Resource { Schema: resourcePingAccessSiteAuthenticatorSchema(), CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { svc := m.(*pingaccess.Client).SiteAuthenticators - desc, _, _ := svc.GetSiteAuthenticatorDescriptorsCommand() + desc, _, err := svc.GetSiteAuthenticatorDescriptorsCommand() + if err != nil { + return fmt.Errorf("unable to retrieve SiteAuthenticator descriptors %s", err) + } className := d.Get("class_name").(string) if err := descriptorsHasClassName(className, desc); err != nil { return err @@ -52,7 +53,7 @@ func resourcePingAccessSiteAuthenticatorSchema() map[string]*schema.Schema { } } -func resourcePingAccessSiteAuthenticatorCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessSiteAuthenticatorCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).SiteAuthenticators input := pingaccess.AddSiteAuthenticatorCommandInput{ Body: *resourcePingAccessSiteAuthenticatorReadData(d), @@ -60,26 +61,26 @@ func resourcePingAccessSiteAuthenticatorCreate(ctx context.Context, d *schema.Re result, _, err := svc.AddSiteAuthenticatorCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create SiteAuthenticator: %s", err)) + return diag.Errorf("unable to create SiteAuthenticator: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessSiteAuthenticatorReadResult(d, result, svc) } -func resourcePingAccessSiteAuthenticatorRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessSiteAuthenticatorRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).SiteAuthenticators input := &pingaccess.GetSiteAuthenticatorCommandInput{ Id: d.Id(), } result, _, err := svc.GetSiteAuthenticatorCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read SiteAuthenticator: %s", err)) + return diag.Errorf("unable to read SiteAuthenticator: %s", err) } return resourcePingAccessSiteAuthenticatorReadResult(d, result, svc) } -func resourcePingAccessSiteAuthenticatorUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessSiteAuthenticatorUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).SiteAuthenticators input := pingaccess.UpdateSiteAuthenticatorCommandInput{ Body: *resourcePingAccessSiteAuthenticatorReadData(d), @@ -87,22 +88,20 @@ func resourcePingAccessSiteAuthenticatorUpdate(ctx context.Context, d *schema.Re } result, _, err := svc.UpdateSiteAuthenticatorCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update SiteAuthenticator: %s", err)) + return diag.Errorf("unable to update SiteAuthenticator: %s", err) } return resourcePingAccessSiteAuthenticatorReadResult(d, result, svc) } -func resourcePingAccessSiteAuthenticatorDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessSiteAuthenticatorDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pingaccess.Client).SiteAuthenticators - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - input := &pingaccess.DeleteSiteAuthenticatorCommandInput{ Id: d.Id(), } _, err := svc.DeleteSiteAuthenticatorCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete SiteAuthenticator: %s", err)) + return diag.Errorf("unable to delete SiteAuthenticator: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_third_party_service.go b/pingaccess/resource_pingaccess_third_party_service.go index fc74f558..ec8e5782 100644 --- a/pingaccess/resource_pingaccess_third_party_service.go +++ b/pingaccess/resource_pingaccess_third_party_service.go @@ -2,7 +2,7 @@ package pingaccess import ( "context" - "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" @@ -89,7 +89,7 @@ func resourcePingAccessThirdPartyServiceCreate(ctx context.Context, d *schema.Re result, _, err := svc.AddThirdPartyServiceCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create ThirdPartyService: %s", err)) + return diag.Errorf("unable to create ThirdPartyService: %s", err) } @@ -104,7 +104,7 @@ func resourcePingAccessThirdPartyServiceRead(ctx context.Context, d *schema.Reso } result, _, err := svc.GetThirdPartyServiceCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read ThirdPartyService: %s", err)) + return diag.Errorf("unable to read ThirdPartyService: %s", err) } return resourcePingAccessThirdPartyServiceReadResult(d, result) } @@ -117,7 +117,7 @@ func resourcePingAccessThirdPartyServiceUpdate(ctx context.Context, d *schema.Re } result, _, err := svc.UpdateThirdPartyServiceCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update ThirdPartyService: %s", err)) + return diag.Errorf("unable to update ThirdPartyService: %s", err) } return resourcePingAccessThirdPartyServiceReadResult(d, result) } @@ -131,7 +131,7 @@ func resourcePingAccessThirdPartyServiceDelete(ctx context.Context, d *schema.Re _, err := svc.DeleteThirdPartyServiceCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete ThirdPartyService: %s", err)) + return diag.Errorf("unable to delete ThirdPartyService: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_trusted_certificate_groups.go b/pingaccess/resource_pingaccess_trusted_certificate_groups.go index 97717b07..a4199214 100644 --- a/pingaccess/resource_pingaccess_trusted_certificate_groups.go +++ b/pingaccess/resource_pingaccess_trusted_certificate_groups.go @@ -2,9 +2,6 @@ package pingaccess import ( "context" - "crypto/tls" - "fmt" - "net/http" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -62,7 +59,7 @@ func resourcePingAccessTrustedCertificateGroupsSchema() map[string]*schema.Schem } } -func resourcePingAccessTrustedCertificateGroupsCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessTrustedCertificateGroupsCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).TrustedCertificateGroups input := pa.AddTrustedCertificateGroupCommandInput{ Body: *resourcePingAccessTrustedCertificateGroupsReadData(d), @@ -70,26 +67,26 @@ func resourcePingAccessTrustedCertificateGroupsCreate(ctx context.Context, d *sc result, _, err := svc.AddTrustedCertificateGroupCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create TrustedCertificateGroup: %s", err)) + return diag.Errorf("unable to create TrustedCertificateGroup: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessTrustedCertificateGroupsReadResult(d, result) } -func resourcePingAccessTrustedCertificateGroupsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessTrustedCertificateGroupsRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).TrustedCertificateGroups input := &pa.GetTrustedCertificateGroupCommandInput{ Id: d.Id(), } result, _, err := svc.GetTrustedCertificateGroupCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read TrustedCertificateGroup: %s", err)) + return diag.Errorf("unable to read TrustedCertificateGroup: %s", err) } return resourcePingAccessTrustedCertificateGroupsReadResult(d, result) } -func resourcePingAccessTrustedCertificateGroupsUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessTrustedCertificateGroupsUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).TrustedCertificateGroups input := pa.UpdateTrustedCertificateGroupCommandInput{ Body: *resourcePingAccessTrustedCertificateGroupsReadData(d), @@ -97,22 +94,20 @@ func resourcePingAccessTrustedCertificateGroupsUpdate(ctx context.Context, d *sc } result, _, err := svc.UpdateTrustedCertificateGroupCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update TrustedCertificateGroup: %s", err)) + return diag.Errorf("unable to update TrustedCertificateGroup: %s", err) } return resourcePingAccessTrustedCertificateGroupsReadResult(d, result) } -func resourcePingAccessTrustedCertificateGroupsDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessTrustedCertificateGroupsDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).TrustedCertificateGroups - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - input := &pa.DeleteTrustedCertificateGroupCommandInput{ Id: d.Id(), } _, err := svc.DeleteTrustedCertificateGroupCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete TrustedCertificateGroup: %s", err)) + return diag.Errorf("unable to delete TrustedCertificateGroup: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_virtualhost.go b/pingaccess/resource_pingaccess_virtualhost.go index 06c7f46e..3041365d 100644 --- a/pingaccess/resource_pingaccess_virtualhost.go +++ b/pingaccess/resource_pingaccess_virtualhost.go @@ -2,7 +2,6 @@ package pingaccess import ( "context" - "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -60,7 +59,7 @@ func resourcePingAccessVirtualHostCreate(ctx context.Context, d *schema.Resource result, _, err := svc.AddVirtualHostCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create VirtualHost: %s", err)) + return diag.Errorf("unable to create VirtualHost: %s", err) } d.SetId(result.Id.String()) @@ -74,7 +73,7 @@ func resourcePingAccessVirtualHostRead(ctx context.Context, d *schema.ResourceDa } result, _, err := svc.GetVirtualHostCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read VirtualHost: %s", err)) + return diag.Errorf("unable to read VirtualHost: %s", err) } return resourcePingAccessVirtualHostReadResult(d, result) } @@ -88,7 +87,7 @@ func resourcePingAccessVirtualHostUpdate(ctx context.Context, d *schema.Resource result, _, err := svc.UpdateVirtualHostCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update VirtualHost: %s", err)) + return diag.Errorf("unable to update VirtualHost: %s", err) } return resourcePingAccessVirtualHostReadResult(d, result) } @@ -101,7 +100,7 @@ func resourcePingAccessVirtualHostDelete(ctx context.Context, d *schema.Resource _, err := svc.DeleteVirtualHostCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete VirtualHost: %s", err)) + return diag.Errorf("unable to delete VirtualHost: %s", err) } return nil } diff --git a/pingaccess/resource_pingaccess_websession.go b/pingaccess/resource_pingaccess_websession.go index b15019fb..eee8f139 100644 --- a/pingaccess/resource_pingaccess_websession.go +++ b/pingaccess/resource_pingaccess_websession.go @@ -2,9 +2,6 @@ package pingaccess import ( "context" - "crypto/tls" - "fmt" - "net/http" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -147,7 +144,7 @@ func resourcePingAccessWebSessionSchema() map[string]*schema.Schema { } } -func resourcePingAccessWebSessionCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessWebSessionCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).WebSessions input := pa.AddWebSessionCommandInput{ Body: *resourcePingAccessWebSessionReadData(d), @@ -155,26 +152,26 @@ func resourcePingAccessWebSessionCreate(ctx context.Context, d *schema.ResourceD result, _, err := svc.AddWebSessionCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to create WebSession: %s", err)) + return diag.Errorf("unable to create WebSession: %s", err) } d.SetId(result.Id.String()) return resourcePingAccessWebSessionReadResult(d, result) } -func resourcePingAccessWebSessionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessWebSessionRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).WebSessions input := &pa.GetWebSessionCommandInput{ Id: d.Id(), } result, _, err := svc.GetWebSessionCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to read WebSession: %s", err)) + return diag.Errorf("unable to read WebSession: %s", err) } return resourcePingAccessWebSessionReadResult(d, result) } -func resourcePingAccessWebSessionUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessWebSessionUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).WebSessions input := pa.UpdateWebSessionCommandInput{ Body: *resourcePingAccessWebSessionReadData(d), @@ -182,22 +179,20 @@ func resourcePingAccessWebSessionUpdate(ctx context.Context, d *schema.ResourceD } result, _, err := svc.UpdateWebSessionCommand(&input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to update WebSession: %s", err)) + return diag.Errorf("unable to update WebSession: %s", err) } return resourcePingAccessWebSessionReadResult(d, result) } -func resourcePingAccessWebSessionDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessWebSessionDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(*pa.Client).WebSessions - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - input := &pa.DeleteWebSessionCommandInput{ Id: d.Id(), } _, err := svc.DeleteWebSessionCommand(input) if err != nil { - return diag.FromErr(fmt.Errorf("unable to delete WebSession: %s", err)) + return diag.Errorf("unable to delete WebSession: %s", err) } return nil } diff --git a/pingaccess/structures.go b/pingaccess/structures.go index 75dab0d9..e3792675 100644 --- a/pingaccess/structures.go +++ b/pingaccess/structures.go @@ -451,7 +451,7 @@ func validateConfiguration(className string, d *schema.ResourceDiff, desc *pa.De for _, f := range value.ConfigurationFields { if *f.Required { if v := gjson.Get(conf, *f.Name); !v.Exists() { - diags = append(diags, diag.FromErr(fmt.Errorf("the field '%s' is required for the class_name '%s'", *f.Name, className))...) + diags = append(diags, diag.Errorf("the field '%s' is required for the class_name '%s'", *f.Name, className)...) } } } @@ -496,7 +496,7 @@ func validateRulesConfiguration(className string, d *schema.ResourceDiff, desc * for _, f := range value.ConfigurationFields { if *f.Required { if v := gjson.Get(conf, *f.Name); !v.Exists() { - diags = append(diags, diag.FromErr(fmt.Errorf("the field '%s' is required for the class_name '%s'", *f.Name, className))...) + diags = append(diags, diag.Errorf("the field '%s' is required for the class_name '%s'", *f.Name, className)...) } } } diff --git a/pingaccess/validators.go b/pingaccess/validators.go index 096af796..0fa0d80a 100644 --- a/pingaccess/validators.go +++ b/pingaccess/validators.go @@ -1,112 +1,110 @@ package pingaccess import ( - "fmt" - "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" ) -func validateWebOrAPI(value interface{}, path cty.Path) diag.Diagnostics { +func validateWebOrAPI(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "Web" && v != "API" { - return diag.FromErr(fmt.Errorf("must be either 'Web' or 'API' not %s", v)) + return diag.Errorf("must be either 'Web' or 'API' not %s", v) } return nil } -func validateRuleOrRuleSet(value interface{}, path cty.Path) diag.Diagnostics { +func validateRuleOrRuleSet(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "Rule" && v != "RuleSet" { - return diag.FromErr(fmt.Errorf("must be either 'Rule' or 'RuleSet' not %s", v)) + return diag.Errorf("must be either 'Rule' or 'RuleSet' not %s", v) } return nil } -func validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(value interface{}, path cty.Path) diag.Diagnostics { +func validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "SuccessIfAllSucceed" && v != "SuccessIfAnyOneSucceeds" { - return diag.FromErr(fmt.Errorf("must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not %s", v)) + return diag.Errorf("must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not %s", v) } return nil } -func validateAudience(value interface{}, path cty.Path) diag.Diagnostics { +func validateAudience(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if len(v) < 1 || len(v) > 31 { - return diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not %d", len(v))) + return diag.Errorf("must be between 1 and 32 characters not %d", len(v)) } return nil } -func validateCookieType(value interface{}, path cty.Path) diag.Diagnostics { +func validateCookieType(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "Encrypted" && v != "Signed" { - return diag.FromErr(fmt.Errorf("must be either 'Encrypted' or 'Signed' not %s", v)) + return diag.Errorf("must be either 'Encrypted' or 'Signed' not %s", v) } return nil } -func validateOidcLoginType(value interface{}, path cty.Path) diag.Diagnostics { +func validateOidcLoginType(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "Code" && v != "POST" && v != "x_post" { - return diag.FromErr(fmt.Errorf("must be either 'Code', 'POST' or 'x_post' not %s", v)) + return diag.Errorf("must be either 'Code', 'POST' or 'x_post' not %s", v) } return nil } -func validatePkceChallengeType(value interface{}, path cty.Path) diag.Diagnostics { +func validatePkceChallengeType(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "SHA256" && v != "OFF" { - return diag.FromErr(fmt.Errorf("must be either 'SHA256' or 'OFF' not %s", v)) + return diag.Errorf("must be either 'SHA256' or 'OFF' not %s", v) } return nil } -func validateRequestPreservationType(value interface{}, path cty.Path) diag.Diagnostics { +func validateRequestPreservationType(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "None" && v != "POST" && v != "All" { - return diag.FromErr(fmt.Errorf("must be either 'None', 'POST' or 'All' not %s", v)) + return diag.Errorf("must be either 'None', 'POST' or 'All' not %s", v) } return nil } -func validateWebStorageType(value interface{}, path cty.Path) diag.Diagnostics { +func validateWebStorageType(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "SessionStorage" && v != "LocalStorage" { - return diag.FromErr(fmt.Errorf("must be either 'SessionStorage' or 'LocalStorage' not %s", v)) + return diag.Errorf("must be either 'SessionStorage' or 'LocalStorage' not %s", v) } return nil } -func validateListLocationValue(value interface{}, path cty.Path) diag.Diagnostics { +func validateListLocationValue(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "FIRST" && v != "LAST" { - return diag.FromErr(fmt.Errorf("must be either 'FIRST' or 'LAST' not %s", v)) + return diag.Errorf("must be either 'FIRST' or 'LAST' not %s", v) } return nil } -func validateHTTPListenerName(value interface{}, path cty.Path) diag.Diagnostics { +func validateHTTPListenerName(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "ADMIN" && v != "AGENT" && v != "ENGINE" { - return diag.FromErr(fmt.Errorf("must be either 'ADMIN', 'AGENT' or 'ENGINE' not %s", v)) + return diag.Errorf("must be either 'ADMIN', 'AGENT' or 'ENGINE' not %s", v) } return nil } -func validateWebSessionSameSite(value interface{}, path cty.Path) diag.Diagnostics { +func validateWebSessionSameSite(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "Disabled" && v != "Lax" && v != "None" { - return diag.FromErr(fmt.Errorf("must be either 'Disabled', 'Lax' or 'None' not %s", v)) + return diag.Errorf("must be either 'Disabled', 'Lax' or 'None' not %s", v) } return nil } -func validateAuditLevel(value interface{}, path cty.Path) diag.Diagnostics { +func validateAuditLevel(value interface{}, _ cty.Path) diag.Diagnostics { v := value.(string) if v != "ON" && v != "OFF" { - return diag.FromErr(fmt.Errorf("must be either 'ON' or 'OFF' not %s", v)) + return diag.Errorf("must be either 'ON' or 'OFF' not %s", v) } return nil } diff --git a/pingaccess/validators_test.go b/pingaccess/validators_test.go index 41cbc034..1e5c2ed2 100644 --- a/pingaccess/validators_test.go +++ b/pingaccess/validators_test.go @@ -1,7 +1,6 @@ package pingaccess import ( - "fmt" "testing" "github.com/hashicorp/go-cty/cty" @@ -27,7 +26,7 @@ func Test_validateWebOrAPI(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.FromErr(fmt.Errorf("must be either 'Web' or 'API' not other")), + expectedDiags: diag.Errorf("must be either 'Web' or 'API' not other"), }, } for _, tc := range tests { @@ -70,7 +69,7 @@ func Test_validateRuleOrRuleSet(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.FromErr(fmt.Errorf("must be either 'Rule' or 'RuleSet' not other")), + expectedDiags: diag.Errorf("must be either 'Rule' or 'RuleSet' not other"), }, } for _, tc := range tests { @@ -113,7 +112,7 @@ func Test_validateSuccessIfAllSucceedOrSuccessIfAnyOneSucceeds(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.FromErr(fmt.Errorf("must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not other")), + expectedDiags: diag.Errorf("must be either 'SuccessIfAllSucceed' or 'SuccessIfAnyOneSucceeds' not other"), }, } for _, tc := range tests { @@ -151,12 +150,12 @@ func Test_validateAudience(t *testing.T) { { name: "Empty value failes", value: "", - expectedDiags: diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not 0")), + expectedDiags: diag.Errorf("must be between 1 and 32 characters not 0"), }, { name: "Very long value fails", value: "12345678901234567890123456789012", - expectedDiags: diag.FromErr(fmt.Errorf("must be between 1 and 32 characters not 32")), + expectedDiags: diag.Errorf("must be between 1 and 32 characters not 32"), }, } for _, tc := range tests { @@ -199,7 +198,7 @@ func Test_validateCookieType(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.FromErr(fmt.Errorf("must be either 'Encrypted' or 'Signed' not other")), + expectedDiags: diag.Errorf("must be either 'Encrypted' or 'Signed' not other"), }, } for _, tc := range tests { @@ -247,7 +246,7 @@ func Test_validateOidcLoginType(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.FromErr(fmt.Errorf("must be either 'Code', 'POST' or 'x_post' not other")), + expectedDiags: diag.Errorf("must be either 'Code', 'POST' or 'x_post' not other"), }, } for _, tc := range tests { @@ -295,7 +294,7 @@ func Test_validateRequestPreservationType(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.FromErr(fmt.Errorf("must be either 'None', 'POST' or 'All' not other")), + expectedDiags: diag.Errorf("must be either 'None', 'POST' or 'All' not other"), }, } for _, tc := range tests { @@ -338,7 +337,7 @@ func Test_validateWebStorageType(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.FromErr(fmt.Errorf("must be either 'SessionStorage' or 'LocalStorage' not other")), + expectedDiags: diag.Errorf("must be either 'SessionStorage' or 'LocalStorage' not other"), }, } for _, tc := range tests { @@ -381,7 +380,7 @@ func Test_validateListLocationValue(t *testing.T) { { name: "junk does not pass", value: "other", - expectedDiags: diag.FromErr(fmt.Errorf("must be either 'FIRST' or 'LAST' not other")), + expectedDiags: diag.Errorf("must be either 'FIRST' or 'LAST' not other"), }, } for _, tc := range tests { From ca53f3cdb1e7d7be2dc1437d2424516e419fec1a Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Sun, 28 Jun 2020 18:50:26 +0100 Subject: [PATCH 12/23] fixes for linux CI --- Makefile | 15 --------------- pingaccess/provider_test.go | 12 +++++++++--- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/Makefile b/Makefile index b4700023..c39c5913 100644 --- a/Makefile +++ b/Makefile @@ -19,21 +19,6 @@ test-and-report: build: @go build -mod=vendor -o ${NAME} -trimpath . -release: - @rm -rf build/* - GOOS=darwin GOARCH=amd64 go build -mod=vendor -o build/darwin_amd64/${NAME} -trimpath . && zip -j build/darwin_amd64.zip build/darwin_amd64/${NAME} - GOOS=freebsd GOARCH=386 go build -mod=vendor -o build/freebsd_386/${NAME} -trimpath . && zip -j build/freebsd_386.zip build/freebsd_386/${NAME} - GOOS=freebsd GOARCH=amd64 go build -mod=vendor -o build/freebsd_amd64/${NAME} -trimpath . && zip -j build/freebsd_amd64.zip build/freebsd_amd64/${NAME} - GOOS=freebsd GOARCH=arm go build -mod=vendor -o build/freebsd_arm/${NAME} -trimpath . && zip -j build/freebsd_arm.zip build/freebsd_arm/${NAME} - GOOS=linux GOARCH=386 go build -mod=vendor -o build/linux_386/${NAME} -trimpath . && zip -j build/linux_386.zip build/linux_386/${NAME} - GOOS=linux GOARCH=amd64 go build -mod=vendor -o build/linux_amd64/${NAME} -trimpath . && zip -j build/linux_amd64.zip build/linux_amd64/${NAME} - GOOS=linux GOARCH=arm go build -mod=vendor -o build/linux_arm/${NAME} -trimpath . && zip -j build/linux_arm.zip build/linux_arm/${NAME} - GOOS=openbsd GOARCH=386 go build -mod=vendor -o build/openbsd_386/${NAME} -trimpath . && zip -j build/openbsd_386.zip build/openbsd_386/${NAME} - GOOS=openbsd GOARCH=amd64 go build -mod=vendor -o build/openbsd_amd64/${NAME} -trimpath . && zip -j build/openbsd_amd64.zip build/openbsd_amd64/${NAME} - GOOS=solaris GOARCH=amd64 go build -mod=vendor -o build/solaris_amd64/${NAME} -trimpath . && zip -j build/solaris_amd64.zip build/solaris_amd64/${NAME} - GOOS=windows GOARCH=386 go build -mod=vendor -o build/windows_386/${NAME} -trimpath . && zip -j build/windows_386.zip build/windows_386/${NAME} - GOOS=windows GOARCH=amd64 go build -mod=vendor -o build/windows_amd64/${NAME} -trimpath . && zip -j build/windows_amd64.zip build/windows_amd64/${NAME} - deploy-local: @mkdir -p ~/.terraform.d/plugins @cp ${NAME} ~/.terraform.d/plugins/ diff --git a/pingaccess/provider_test.go b/pingaccess/provider_test.go index 788ae038..31152389 100644 --- a/pingaccess/provider_test.go +++ b/pingaccess/provider_test.go @@ -6,6 +6,7 @@ import ( "fmt" "log" "math/rand" + "net" "net/http" "net/http/httptest" "net/url" @@ -30,7 +31,7 @@ func TestMain(m *testing.M) { if err != nil { log.Fatalf("Could not connect to docker: %s", err) } - server := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + server := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { // Send response to be tested rw.Header().Set("Content-Type", "application/json;charset=utf-8") rw.Write([]byte(` @@ -68,6 +69,9 @@ func TestMain(m *testing.M) { } `)) })) + l, err := net.Listen("tcp", ":0") + server.Listener = l //for CI tests as host.docker.internal is window/macosx + server.StartTLS() // Close the server when test finishes defer server.Close() @@ -83,7 +87,8 @@ func TestMain(m *testing.M) { Name: fmt.Sprintf("pa-%s", randomID), Repository: "pingidentity/pingaccess", Tag: "6.0.2-edge", - Env: []string{"PING_IDENTITY_ACCEPT_EULA=YES", fmt.Sprintf("PING_IDENTITY_DEVOPS_USER=%s", devOpsUser), fmt.Sprintf("PING_IDENTITY_DEVOPS_KEY=%s", devOpsKey)}, + //ExtraHosts: []string{"host.docker.internal:host-gateway"}, + Env: []string{"PING_IDENTITY_ACCEPT_EULA=YES", fmt.Sprintf("PING_IDENTITY_DEVOPS_USER=%s", devOpsUser), fmt.Sprintf("PING_IDENTITY_DEVOPS_KEY=%s", devOpsKey)}, } paCont, err := pool.RunWithOptions(paOpts) if err != nil { @@ -107,7 +112,8 @@ func TestMain(m *testing.M) { } os.Setenv("PINGACCESS_BASEURL", fmt.Sprintf("https://localhost:%s", paCont.GetPort("9000/tcp"))) os.Setenv("PINGACCESS_PASSWORD", "2FederateM0re") - os.Setenv("PINGFEDERATE_TEST_IP", strings.Replace(server.URL, "127.0.0.1", "host.docker.internal", -1)) + host , _ := os.Hostname() //for CI tests as host.docker.internal is window/macosx + os.Setenv("PINGFEDERATE_TEST_IP", strings.Replace(server.URL, "[::]", host, -1)) log.Println("Connected to PingAccess admin API....") version, _, err := client.Version.VersionCommand() From aaead7ab91b83aa53ec1f35ded21be6e5aa4ef39 Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Sun, 28 Jun 2020 19:01:33 +0100 Subject: [PATCH 13/23] update for gh actions and cleanup --- .github/workflows/gosec.yaml | 20 ++++++++++ .github/workflows/release.yaml | 38 +++++++++++++++++++ pingaccess/config.go | 1 + ...esource_pingaccess_pingfederate_runtime.go | 19 +++++----- 4 files changed, 69 insertions(+), 9 deletions(-) create mode 100644 .github/workflows/gosec.yaml create mode 100644 .github/workflows/release.yaml diff --git a/.github/workflows/gosec.yaml b/.github/workflows/gosec.yaml new file mode 100644 index 00000000..87f80ada --- /dev/null +++ b/.github/workflows/gosec.yaml @@ -0,0 +1,20 @@ +name: gosec +on: + push: + branches: + - master + pull_request: + branches: + - master +jobs: + tests: + runs-on: ubuntu-latest + env: + GO111MODULE: on + steps: + - name: Checkout Source + uses: actions/checkout@v2 + - name: Run Gosec Security Scanner + uses: securego/gosec@master + with: + args: ./... diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 00000000..1ef11289 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,38 @@ +name: release + +on: + push: + tags: + - 'v*' + +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v1 + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13.x + - name: Import GPG key + id: import_gpg + uses: crazy-max/ghaction-import-gpg@v2 + env: + GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} + PASSPHRASE: ${{ secrets.PASSPHRASE }} + - uses: actions/cache@v1 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v1 + with: + version: latest + args: release --rm-dist + key: ${{ secrets.YOUR_PRIVATE_KEY }} + env: + GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/pingaccess/config.go b/pingaccess/config.go index abd34f44..1191f5b0 100644 --- a/pingaccess/config.go +++ b/pingaccess/config.go @@ -20,6 +20,7 @@ type config struct { // Client configures and returns a fully initialized PAClient func (c *config) Client() (interface{}, diag.Diagnostics) { + /* #nosec */ http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} url, _ := url.Parse(c.BaseURL) client := pingaccess.NewClient(c.Username, c.Password, url, c.Context, nil) diff --git a/pingaccess/resource_pingaccess_pingfederate_runtime.go b/pingaccess/resource_pingaccess_pingfederate_runtime.go index 71a14301..d8b7909f 100644 --- a/pingaccess/resource_pingaccess_pingfederate_runtime.go +++ b/pingaccess/resource_pingaccess_pingfederate_runtime.go @@ -22,6 +22,7 @@ func resourcePingAccessPingFederateRuntime() *schema.Resource { } func resourcePingAccessPingFederateRuntimeSchema() map[string]*schema.Schema { + deprecationMsg := "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead." return map[string]*schema.Schema{ //New API "description": { @@ -48,53 +49,53 @@ func resourcePingAccessPingFederateRuntimeSchema() map[string]*schema.Schema { Optional: true, Default: "ON", ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, - Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + Deprecated: deprecationMsg, }, "back_channel_base_path": { Type: schema.TypeString, Optional: true, ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, - Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + Deprecated: deprecationMsg, }, "back_channel_secure": { Type: schema.TypeBool, Optional: true, Default: false, ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, - Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + Deprecated: deprecationMsg, }, "base_path": { Type: schema.TypeString, Optional: true, ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, - Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + Deprecated: deprecationMsg, }, "expected_hostname": { Type: schema.TypeString, Optional: true, ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, - Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + Deprecated: deprecationMsg, }, "host": { Type: schema.TypeString, Optional: true, RequiredWith: []string{"host", "port"}, ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, - Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + Deprecated: deprecationMsg, }, "port": { Type: schema.TypeInt, Optional: true, RequiredWith: []string{"host", "port"}, ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, - Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + Deprecated: deprecationMsg, }, "secure": { Type: schema.TypeBool, Optional: true, Default: false, ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, - Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + Deprecated: deprecationMsg, }, "targets": { Type: schema.TypeSet, @@ -104,7 +105,7 @@ func resourcePingAccessPingFederateRuntimeSchema() map[string]*schema.Schema { Type: schema.TypeString, }, ConflictsWith: []string{"description", "issuer", "sts_token_exchange_endpoint"}, - Deprecated: "This is the deprecated pingfederate runtime configuration and is only to provide support for 5.x. If you are using 6.x please use 'issuer' configuration instead.", + Deprecated: deprecationMsg, }, //Common From 84cca8152dd3493969bb35cd95ae4fe62c599eaf Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Sun, 28 Jun 2020 19:10:10 +0100 Subject: [PATCH 14/23] add gosec for all changes --- .github/workflows/gosec.yaml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/.github/workflows/gosec.yaml b/.github/workflows/gosec.yaml index 87f80ada..30825c06 100644 --- a/.github/workflows/gosec.yaml +++ b/.github/workflows/gosec.yaml @@ -1,11 +1,5 @@ name: gosec -on: - push: - branches: - - master - pull_request: - branches: - - master +on: [push, pull_request] jobs: tests: runs-on: ubuntu-latest From 67387509224c143df25b80f330a74d3c8324d519 Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Sun, 28 Jun 2020 19:49:03 +0100 Subject: [PATCH 15/23] add experimental regression test workflow --- .github/workflows/functional.yaml | 72 +++++++++++++++++++++++++++++++ README.md | 10 +++++ func-tests/main.tf | 30 ++++++++++++- func-tests/pf-settings.json | 44 +++++++++++++++++++ func-tests/variables.tf | 5 +++ 5 files changed, 159 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/functional.yaml create mode 100644 func-tests/pf-settings.json create mode 100644 func-tests/variables.tf diff --git a/.github/workflows/functional.yaml b/.github/workflows/functional.yaml new file mode 100644 index 00000000..71a5b8bf --- /dev/null +++ b/.github/workflows/functional.yaml @@ -0,0 +1,72 @@ +on: [push] +name: e2e +jobs: + e2e: + name: e2e + strategy: + matrix: + pingaccess-version: [5.2.3, 5.3.2, 6.0.2, 6.1.0] + services: + pingaccess: + image: pingidentity/pingaccess:${{ matrix.pingaccess-version }}-edge + env: + PING_IDENTITY_ACCEPT_EULA: YES + PING_IDENTITY_DEVOPS_USER: ${{ secrets.PING_IDENTITY_DEVOPS_USER }} + PING_IDENTITY_DEVOPS_KEY: ${{ secrets.PING_IDENTITY_DEVOPS_KEY }} + ports: + - 9000:9000 + pingfederate: + image: pingidentity/pingfederate:10.0.2-edge + env: + PING_IDENTITY_ACCEPT_EULA: YES + PING_IDENTITY_DEVOPS_USER: ${{ secrets.PING_IDENTITY_DEVOPS_USER }} + PING_IDENTITY_DEVOPS_KEY: ${{ secrets.PING_IDENTITY_DEVOPS_KEY }} + SERVER_PROFILE_URL: https://github.com/pingidentity/pingidentity-server-profiles.git + SERVER_PROFILE_PATH: getting-started/pingfederate + ports: + - 9999:9999 + - 9031:9031 + + runs-on: ubuntu-latest + steps: + - name: Set up Go 1.14 + uses: actions/setup-go@v1 + with: + go-version: 1.14 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v1 + + - name: Setup Test Environment + run: | + curl -k -i -H "Content-Type: application/json" -H "X-Xsrf-Header: PingFederate" --user Administrator:2FederateM0re -d "@func-tests/pf-settings.json" -X PUT https://localhost:9999/pf-admin-api/v1/serverSettings + curl -k https://localhost:9031/.well-known/openid-configuration + + - name: Change default PA password for 5.2 + if: ${{ startsWith( matrix.pingaccess-version , '5.2') }} + run: sed -i 's/2FederateM0re/2Access/g' func-tests/main.tf + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v1 + + - name: Install Build and Deploy + run: make build deploy-local + + - name: Terraform Init + run: make func-init + + - name: Terraform Plan + run: make func-plan + env: + TF_VAR_pa6: ${{ startsWith( matrix.pingaccess-version , '6') }} + + - name: Terraform Apply + run: make func-apply + env: + TF_VAR_pa6: ${{ startsWith( matrix.pingaccess-version , '6') }} + + - name: Terraform Plan + run: make func-plan + env: + TF_VAR_pa6: ${{ startsWith( matrix.pingaccess-version , '6') }} diff --git a/README.md b/README.md index 282cfa8f..31955186 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ PingAccess Terraform Provider [![Coverage](https://sonarcloud.io/api/project_badges/measure?project=github.conef.uk.iwarapter.terraform-provider-pingaccess&metric=coverage)](https://sonarcloud.io/dashboard?id=github.conef.uk.iwarapter.terraform-provider-pingaccess) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=github.conef.uk.iwarapter.terraform-provider-pingaccess&metric=alert_status)](https://sonarcloud.io/dashboard?id=github.conef.uk.iwarapter.terraform-provider-pingaccess) ![ci](https://github.com/iwarapter/terraform-provider-pingaccess/workflows/ci/badge.svg) + ![e2e](https://github.com/iwarapter/terraform-provider-pingaccess/workflows/e2e/badge.svg) ![GitHub release (latest by date)](https://img.shields.io/github/v/release/iwarapter/terraform-provider-pingaccess) @@ -45,6 +46,15 @@ Using the Provider To use a released provider in your Terraform environment, download the latest version from https://github.com/iwarapter/terraform-provider-pingaccess/releases and follow the instructions to [install it as a plugin.](https://www.terraform.io/docs/plugins/basics.html#installing-a-plugin) After placing it into your plugins directory, run `terraform init` to initialize it. +The provider is current tested against the following versions of PingAccess + +| PingAccess | Status | +|------------|--------| +| 5.2.3 | ![e2e](https://github.com/iwarapter/terraform-provider-pingaccess/workflows/e2e/badge.svg)| +| 5.3.2 | ![e2e](https://github.com/iwarapter/terraform-provider-pingaccess/workflows/e2e/badge.svg)| +| 6.0.2 | ![e2e](https://github.com/iwarapter/terraform-provider-pingaccess/workflows/e2e/badge.svg)| +| 6.1.0 | ![e2e](https://github.com/iwarapter/terraform-provider-pingaccess/workflows/e2e/badge.svg)| + Testing the Provider --------------------------- diff --git a/func-tests/main.tf b/func-tests/main.tf index bc483e6a..ca3e55ae 100644 --- a/func-tests/main.tf +++ b/func-tests/main.tf @@ -2,6 +2,10 @@ provider "pingaccess" { password = "2FederateM0re" } +data "pingaccess_trusted_certificate_group" "trust_any" { + name = "Trust Any" +} + resource "pingaccess_site" "demo" { name = "demo" targets = ["localhost:4321"] @@ -91,8 +95,9 @@ resource "pingaccess_rule" "demo_2" { } resource "pingaccess_acme_server" "acc_test" { - name = "foo" - url = "https://host.docker.internal:14000/dir" + count = var.pa6 ? 1 : 0 + name = "foo" + url = "https://host.docker.internal:14000/dir" } resource "pingaccess_identity_mapping" "demo_identity_mapping" { @@ -139,3 +144,24 @@ resource "pingaccess_websession" "demo_session" { "phone", ] } + +resource "pingaccess_pingfederate_runtime" "pa6_demo" { + count = var.pa6 ? 1 : 0 + description = "demo" + issuer = "https://pingfederate:9031" + sts_token_exchange_endpoint = "https://foo/bar" + skip_hostname_verification = true + use_slo = false + trusted_certificate_group_id = data.pingaccess_trusted_certificate_group.trust_any.id + use_proxy = false +} + +resource "pingaccess_pingfederate_runtime" "pa5_demo" { + count = var.pa6 ? 0 : 1 + host = "pingfederate" + port = 9031 + skip_hostname_verification = true + use_slo = false + trusted_certificate_group_id = data.pingaccess_trusted_certificate_group.trust_any.id + use_proxy = true +} diff --git a/func-tests/pf-settings.json b/func-tests/pf-settings.json new file mode 100644 index 00000000..953c113e --- /dev/null +++ b/func-tests/pf-settings.json @@ -0,0 +1,44 @@ +{ + "contactInfo": {}, + "rolesAndProtocols": { + "oauthRole": { + "enableOauth": true, + "enableOpenIdConnect": true + }, + "idpRole": { + "enable": true, + "enableSaml11": false, + "enableSaml10": false, + "enableWsFed": false, + "enableWsTrust": false, + "saml20Profile": { + "enable": true + }, + "enableOutboundProvisioning": false + }, + "spRole": { + "enable": true, + "enableSaml11": false, + "enableSaml10": false, + "enableWsFed": false, + "enableWsTrust": false, + "saml20Profile": { + "enable": true, + "enableXASP": false + }, + "enableInboundProvisioning": false, + "enableOpenIDConnect": false + }, + "enableIdpDiscovery": false + }, + "federationInfo": { + "baseUrl": "https://localhost:9031", + "saml2EntityId": "testing", + "saml1xIssuerId": "", + "saml1xSourceId": "", + "wsfedRealm": "" + }, + "notifications": { + "notifyAdminUserPasswordChanges": false + } +} diff --git a/func-tests/variables.tf b/func-tests/variables.tf new file mode 100644 index 00000000..5981a84f --- /dev/null +++ b/func-tests/variables.tf @@ -0,0 +1,5 @@ +variable "pa6" { + description = "If we are testing against PingAccess 6.x" + type = bool + default = true +} From b69ff2258bdde1f2ff63404963f9182ae96bc885 Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Mon, 29 Jun 2020 22:19:37 +0100 Subject: [PATCH 16/23] restructure for docs and functional testing --- .../pingaccess_access_token_validator.md | 2 +- docs/resources/pingaccess_acme_server.md | 2 +- docs/resources/pingaccess_application.md | 2 +- .../pingaccess_application_resource.md | 2 +- .../pingaccess_auth_token_management.md | 2 +- docs/resources/pingaccess_authn_req_list.md | 2 +- docs/resources/pingaccess_certificate.md | 2 +- docs/resources/pingaccess_engine_listener.md | 2 +- docs/resources/pingaccess_hsm_provider.md | 2 +- ...gaccess_http_config_request_host_source.md | 2 +- docs/resources/pingaccess_https_listener.md | 2 +- docs/resources/pingaccess_identity_mapping.md | 2 +- docs/resources/pingaccess_keypair.md | 20 ++++- docs/resources/pingaccess_oauth_server.md | 2 +- .../pingaccess_pingfederate_admin.md | 2 +- .../pingaccess_pingfederate_oauth.md | 2 +- .../pingaccess_pingfederate_runtime.md | 9 +- docs/resources/pingaccess_rule.md | 2 +- docs/resources/pingaccess_ruleset.md | 2 +- docs/resources/pingaccess_site.md | 2 +- .../pingaccess_site_authenticator.md | 2 +- .../pingaccess_third_party_service.md | 2 +- .../pingaccess_trusted_certificate_group.md | 2 +- docs/resources/pingaccess_websession.md | 2 +- .../access_token_validator.tf | 0 .../test_cases => func-tests}/acme_server.tf | 0 .../amazon_root_ca2.pem | 0 .../amazon_root_ca3.pem | 0 func-tests/application.tf | 15 ++++ .../application_resource.tf | 8 +- .../auth_token_management.tf | 0 .../authn_req_list.tf | 0 .../test_cases => func-tests}/certificate.tf | 2 +- .../engine_listener.tf | 0 .../test_cases => func-tests}/hsm_provider.tf | 6 +- .../http_config_request_host_source.tf | 0 .../https_listener.tf | 0 .../identity_mapping.tf | 0 func-tests/keypair.tf | 5 ++ func-tests/keypair_generate.tf | 12 +++ func-tests/main.tf | 83 ------------------ .../test_cases => func-tests}/oauth_server.tf | 2 +- .../pingfederate_admin.tf | 0 .../pingfederate_oauth.tf | 2 +- func-tests/provider.p12 | Bin 0 -> 4581 bytes {pingaccess/test_cases => func-tests}/rule.tf | 0 .../test_cases => func-tests}/ruleset.tf | 4 +- {pingaccess/test_cases => func-tests}/site.tf | 2 +- .../site_authenticator.tf | 0 .../third_party_service.tf | 0 .../trusted_certificate_group.tf | 0 func-tests/virtualhost.tf | 7 ++ .../test_cases => func-tests}/websession.tf | 0 pingaccess/provider_test.go | 2 +- pingaccess/resource_pingaccess_application.go | 3 +- .../resource_pingaccess_hsm_provider.go | 5 +- .../resource_pingaccess_oauth_server.go | 2 +- pingaccess/resource_pingaccess_site.go | 7 ++ pingaccess/test_cases/amazon_root_ca1.pem | 17 ---- pingaccess/test_cases/application.tf | 16 ---- pingaccess/test_cases/keypair.tf | 5 -- pingaccess/test_cases/pingfederate_runtime.tf | 8 -- 62 files changed, 113 insertions(+), 173 deletions(-) rename {pingaccess/test_cases => func-tests}/access_token_validator.tf (100%) rename {pingaccess/test_cases => func-tests}/acme_server.tf (100%) rename {pingaccess/test_cases => func-tests}/amazon_root_ca2.pem (100%) rename {pingaccess/test_cases => func-tests}/amazon_root_ca3.pem (100%) create mode 100644 func-tests/application.tf rename {pingaccess/test_cases => func-tests}/application_resource.tf (82%) rename {pingaccess/test_cases => func-tests}/auth_token_management.tf (100%) rename {pingaccess/test_cases => func-tests}/authn_req_list.tf (100%) rename {pingaccess/test_cases => func-tests}/certificate.tf (53%) rename {pingaccess/test_cases => func-tests}/engine_listener.tf (100%) rename {pingaccess/test_cases => func-tests}/hsm_provider.tf (71%) rename {pingaccess/test_cases => func-tests}/http_config_request_host_source.tf (100%) rename {pingaccess/test_cases => func-tests}/https_listener.tf (100%) rename {pingaccess/test_cases => func-tests}/identity_mapping.tf (100%) create mode 100644 func-tests/keypair.tf create mode 100644 func-tests/keypair_generate.tf rename {pingaccess/test_cases => func-tests}/oauth_server.tf (81%) rename {pingaccess/test_cases => func-tests}/pingfederate_admin.tf (100%) rename {pingaccess/test_cases => func-tests}/pingfederate_oauth.tf (88%) create mode 100644 func-tests/provider.p12 rename {pingaccess/test_cases => func-tests}/rule.tf (100%) rename {pingaccess/test_cases => func-tests}/ruleset.tf (69%) rename {pingaccess/test_cases => func-tests}/site.tf (58%) rename {pingaccess/test_cases => func-tests}/site_authenticator.tf (100%) rename {pingaccess/test_cases => func-tests}/third_party_service.tf (100%) rename {pingaccess/test_cases => func-tests}/trusted_certificate_group.tf (100%) create mode 100644 func-tests/virtualhost.tf rename {pingaccess/test_cases => func-tests}/websession.tf (100%) delete mode 100644 pingaccess/test_cases/amazon_root_ca1.pem delete mode 100644 pingaccess/test_cases/application.tf delete mode 100644 pingaccess/test_cases/keypair.tf delete mode 100644 pingaccess/test_cases/pingfederate_runtime.tf diff --git a/docs/resources/pingaccess_access_token_validator.md b/docs/resources/pingaccess_access_token_validator.md index a621aa38..9f6c8c4e 100644 --- a/docs/resources/pingaccess_access_token_validator.md +++ b/docs/resources/pingaccess_access_token_validator.md @@ -7,7 +7,7 @@ Provides a access token validator. ## Example Usage ```terraform -{!../pingaccess/test_cases/access_token_validator.tf!} +{!../func-tests//access_token_validator.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_acme_server.md b/docs/resources/pingaccess_acme_server.md index cee48289..89c28dce 100644 --- a/docs/resources/pingaccess_acme_server.md +++ b/docs/resources/pingaccess_acme_server.md @@ -4,7 +4,7 @@ Provides a acme server. ## Example Usage ```terraform -{!../pingaccess/test_cases/acme_server.tf!} +{!../func-tests//acme_server.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_application.md b/docs/resources/pingaccess_application.md index 7aadf867..93effc5e 100644 --- a/docs/resources/pingaccess_application.md +++ b/docs/resources/pingaccess_application.md @@ -4,7 +4,7 @@ Provides a application. ## Example Usage ```terraform -{!../pingaccess/test_cases/application.tf!} +{!../func-tests//application.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_application_resource.md b/docs/resources/pingaccess_application_resource.md index dbb4dd59..6c04a285 100644 --- a/docs/resources/pingaccess_application_resource.md +++ b/docs/resources/pingaccess_application_resource.md @@ -4,7 +4,7 @@ Provides a application resource. ## Example Usage ```terraform -{!../pingaccess/test_cases/application_resource.tf!} +{!../func-tests//application_resource.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_auth_token_management.md b/docs/resources/pingaccess_auth_token_management.md index f269058e..9fb7bb3d 100644 --- a/docs/resources/pingaccess_auth_token_management.md +++ b/docs/resources/pingaccess_auth_token_management.md @@ -7,7 +7,7 @@ Provides a auth token management. ## Example Usage ```terraform -{!../pingaccess/test_cases/auth_token_management.tf!} +{!../func-tests//auth_token_management.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_authn_req_list.md b/docs/resources/pingaccess_authn_req_list.md index e8829bad..a5dfd8c7 100644 --- a/docs/resources/pingaccess_authn_req_list.md +++ b/docs/resources/pingaccess_authn_req_list.md @@ -4,7 +4,7 @@ Provides a AuthN Req List. ## Example Usage ```terraform -{!../pingaccess/test_cases/authn_req_list.tf!} +{!../func-tests//authn_req_list.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_certificate.md b/docs/resources/pingaccess_certificate.md index 28a44667..c4e980b9 100644 --- a/docs/resources/pingaccess_certificate.md +++ b/docs/resources/pingaccess_certificate.md @@ -4,7 +4,7 @@ Provides a ceritficate. ### Example Usage ```terraform -{!../pingaccess/test_cases/certificate.tf!} +{!../func-tests//certificate.tf!} ``` ### Argument Attributes The following arguments are supported: diff --git a/docs/resources/pingaccess_engine_listener.md b/docs/resources/pingaccess_engine_listener.md index 8070fda1..69b7c019 100644 --- a/docs/resources/pingaccess_engine_listener.md +++ b/docs/resources/pingaccess_engine_listener.md @@ -4,7 +4,7 @@ Provides a engine listener. ## Example Usage ```terraform -{!../pingaccess/test_cases/engine_listener.tf!} +{!../func-tests//engine_listener.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_hsm_provider.md b/docs/resources/pingaccess_hsm_provider.md index da23fd0d..26ddfe29 100644 --- a/docs/resources/pingaccess_hsm_provider.md +++ b/docs/resources/pingaccess_hsm_provider.md @@ -7,7 +7,7 @@ Provides a HSM provider. ## Example Usage ```terraform -{!../pingaccess/test_cases/hsm_provider.tf!} +{!../func-tests//hsm_provider.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_http_config_request_host_source.md b/docs/resources/pingaccess_http_config_request_host_source.md index 27a04d63..0ce4ceeb 100644 --- a/docs/resources/pingaccess_http_config_request_host_source.md +++ b/docs/resources/pingaccess_http_config_request_host_source.md @@ -7,7 +7,7 @@ Provides HTTP request Host Source type resource. ## Example Usage ```terraform -{!../pingaccess/test_cases/http_config_request_host_source.tf!} +{!../func-tests//http_config_request_host_source.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_https_listener.md b/docs/resources/pingaccess_https_listener.md index b24e12f9..a2c91735 100644 --- a/docs/resources/pingaccess_https_listener.md +++ b/docs/resources/pingaccess_https_listener.md @@ -4,7 +4,7 @@ Provides a https listener. ## Example Usage ```terraform -{!../pingaccess/test_cases/https_listener.tf!} +{!../func-tests//https_listener.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_identity_mapping.md b/docs/resources/pingaccess_identity_mapping.md index b3ad089d..ed029839 100644 --- a/docs/resources/pingaccess_identity_mapping.md +++ b/docs/resources/pingaccess_identity_mapping.md @@ -7,7 +7,7 @@ Provides a identity mapping. ### Example Usage ```terraform -{!../pingaccess/test_cases/identity_mapping.tf!} +{!../func-tests//identity_mapping.tf!} ``` ### Argument Attributes The following arguments are supported: diff --git a/docs/resources/pingaccess_keypair.md b/docs/resources/pingaccess_keypair.md index 50257808..3900ed0e 100644 --- a/docs/resources/pingaccess_keypair.md +++ b/docs/resources/pingaccess_keypair.md @@ -7,18 +7,32 @@ Provides a keypair. ### Example Usage ```terraform -{!../pingaccess/test_cases/keypair.tf!} +{!../func-tests//keypair.tf!} + +{!../func-tests//keypair_generate.tf!} ``` ### Argument Attributes The following arguments are supported: - [`alias`](#alias) - (required) The alias for the keypair. +- [`hsm_provider_id`](#hsm_provider_id) - The HSM Provider ID. -- [`file_data`](#file_data) - (required) The base64-encoded data of the keypair. +#### Importing KeyPair +- [`file_data`](#file_data) - (required) The base64-encoded data of the keypair. - [`password`](#password) - The Password used to protect the PKCS#12 file. -- [`hsm_provider_id`](#hsm_provider_id) - The HSM Provider ID. +#### Generating KeyPair + +- [`city`](#city) - (Required) The city or other primary location (L) where the company operates. +- [`common_name`](#common_name) - (Required) The common name (CN) identifying the certificate. +- [`country`](#country) - (Required) The country (C) where the company is based, using two capital letters. +- [`key_algorithm`](#key_algorithm) - (Required) The key algorithm to use to generate a key. +- [`key_size`](#key_size) - (Required) The number of bits used in the key. Choices depend on selected key algorithm. +- [`organization`](#organization) - (Required) The organization (O) or company name creating the certificate. +- [`organization_unit`](#organization_unit) - (Required) The specific unit within the organization (OU). +- [`state`](#state) - (Required) The state (ST) or other political unit encompassing the location. +- [`valid_days`](#valid_days) - (Required) The number of days the certificate is valid. ### Attributes Reference diff --git a/docs/resources/pingaccess_oauth_server.md b/docs/resources/pingaccess_oauth_server.md index 7c7a3e2e..0107cbf4 100644 --- a/docs/resources/pingaccess_oauth_server.md +++ b/docs/resources/pingaccess_oauth_server.md @@ -4,7 +4,7 @@ Provides an Authorization Server configuration. ### Example Usage ```terraform -{!../pingaccess/test_cases/oauth_server.tf!} +{!../func-tests//oauth_server.tf!} ``` ### Argument Attributes The following arguments are supported: diff --git a/docs/resources/pingaccess_pingfederate_admin.md b/docs/resources/pingaccess_pingfederate_admin.md index 9f2668f9..1dbe5b1d 100644 --- a/docs/resources/pingaccess_pingfederate_admin.md +++ b/docs/resources/pingaccess_pingfederate_admin.md @@ -7,7 +7,7 @@ Configured the PingFederate OAuth. ## Example Usage ```terraform -{!../pingaccess/test_cases/pingfederate_admin.tf!} +{!../func-tests//pingfederate_admin.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_pingfederate_oauth.md b/docs/resources/pingaccess_pingfederate_oauth.md index a95b9b44..cdc54866 100644 --- a/docs/resources/pingaccess_pingfederate_oauth.md +++ b/docs/resources/pingaccess_pingfederate_oauth.md @@ -7,7 +7,7 @@ Configured the PingFederate OAuth. ## Example Usage ```terraform -{!../pingaccess/test_cases/pingfederate_oauth.tf!} +{!../func-tests//pingfederate_oauth.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_pingfederate_runtime.md b/docs/resources/pingaccess_pingfederate_runtime.md index d51c5968..2401d90c 100644 --- a/docs/resources/pingaccess_pingfederate_runtime.md +++ b/docs/resources/pingaccess_pingfederate_runtime.md @@ -7,7 +7,14 @@ Configured the PingFederate runtime. ## Example Usage ```terraform -{!../pingaccess/test_cases/pingfederate_runtime.tf!} +resource "pingaccess_pingfederate_runtime" "demo" { + description = "foo" + issuer = "https://localhost:9031" + skip_hostname_verification = true + use_slo = false + trusted_certificate_group_id = 2 + use_proxy = true +} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_rule.md b/docs/resources/pingaccess_rule.md index 2f5a010a..b01c84ea 100644 --- a/docs/resources/pingaccess_rule.md +++ b/docs/resources/pingaccess_rule.md @@ -7,7 +7,7 @@ Provides a rule. ## Example Usage ```terraform -{!../pingaccess/test_cases/rule.tf!} +{!../func-tests//rule.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_ruleset.md b/docs/resources/pingaccess_ruleset.md index 7f66037e..2a5358b9 100644 --- a/docs/resources/pingaccess_ruleset.md +++ b/docs/resources/pingaccess_ruleset.md @@ -4,7 +4,7 @@ Provides a ruleset. ## Example Usage ```terraform -{!../pingaccess/test_cases/ruleset.tf!} +{!../func-tests//ruleset.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_site.md b/docs/resources/pingaccess_site.md index f49f7b29..4197470d 100644 --- a/docs/resources/pingaccess_site.md +++ b/docs/resources/pingaccess_site.md @@ -4,7 +4,7 @@ Provides a site. ## Example Usage ```terraform -{!../pingaccess/test_cases/site.tf!} +{!../func-tests//site.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_site_authenticator.md b/docs/resources/pingaccess_site_authenticator.md index fcb488bf..fa1383ca 100644 --- a/docs/resources/pingaccess_site_authenticator.md +++ b/docs/resources/pingaccess_site_authenticator.md @@ -11,7 +11,7 @@ Provides a site authenticator. ## Example Usage ```terraform -{!../pingaccess/test_cases/site_authenticator.tf!} +{!../func-tests//site_authenticator.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_third_party_service.md b/docs/resources/pingaccess_third_party_service.md index 66725b0e..bcf0f03b 100644 --- a/docs/resources/pingaccess_third_party_service.md +++ b/docs/resources/pingaccess_third_party_service.md @@ -4,7 +4,7 @@ Provides a third party service. ## Example Usage ```terraform -{!../pingaccess/test_cases/third_party_service.tf!} +{!../func-tests//third_party_service.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_trusted_certificate_group.md b/docs/resources/pingaccess_trusted_certificate_group.md index 2cc37545..31d75401 100644 --- a/docs/resources/pingaccess_trusted_certificate_group.md +++ b/docs/resources/pingaccess_trusted_certificate_group.md @@ -4,7 +4,7 @@ Provides a trusted certificate group. ## Example Usage ```terraform -{!../pingaccess/test_cases/trusted_certificate_group.tf!} +{!../func-tests//trusted_certificate_group.tf!} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_websession.md b/docs/resources/pingaccess_websession.md index 1600c90d..0b737e9d 100644 --- a/docs/resources/pingaccess_websession.md +++ b/docs/resources/pingaccess_websession.md @@ -7,7 +7,7 @@ Provides a web session. ## Example Usage ```terraform -{!../pingaccess/test_cases/websession.tf!} +{!../func-tests//websession.tf!} ``` ## Argument Attributes diff --git a/pingaccess/test_cases/access_token_validator.tf b/func-tests/access_token_validator.tf similarity index 100% rename from pingaccess/test_cases/access_token_validator.tf rename to func-tests/access_token_validator.tf diff --git a/pingaccess/test_cases/acme_server.tf b/func-tests/acme_server.tf similarity index 100% rename from pingaccess/test_cases/acme_server.tf rename to func-tests/acme_server.tf diff --git a/pingaccess/test_cases/amazon_root_ca2.pem b/func-tests/amazon_root_ca2.pem similarity index 100% rename from pingaccess/test_cases/amazon_root_ca2.pem rename to func-tests/amazon_root_ca2.pem diff --git a/pingaccess/test_cases/amazon_root_ca3.pem b/func-tests/amazon_root_ca3.pem similarity index 100% rename from pingaccess/test_cases/amazon_root_ca3.pem rename to func-tests/amazon_root_ca3.pem diff --git a/func-tests/application.tf b/func-tests/application.tf new file mode 100644 index 00000000..f67cb019 --- /dev/null +++ b/func-tests/application.tf @@ -0,0 +1,15 @@ +resource "pingaccess_application" "demo_application" { + application_type = "Web" + name = "demo" + context_root = "/" + destination = "Site" + site_id = pingaccess_site.demo.id + virtual_host_ids = [pingaccess_virtualhost.demo.id] + + policy { + web { + type = "Rule" + id = pingaccess_rule.demo_rule.id + } + } +} diff --git a/pingaccess/test_cases/application_resource.tf b/func-tests/application_resource.tf similarity index 82% rename from pingaccess/test_cases/application_resource.tf rename to func-tests/application_resource.tf index 8e2bb5bf..316c236c 100644 --- a/pingaccess/test_cases/application_resource.tf +++ b/func-tests/application_resource.tf @@ -11,13 +11,13 @@ resource "pingaccess_application_resource" "demo_application_resource" { } path_patterns { - pattern = "%s" + pattern = "/foo" type = "WILDCARD" } path_prefixes = [ "/as/token.oauth2", - "%s", + "/foo", ] audit_level = "OFF" @@ -29,12 +29,12 @@ resource "pingaccess_application_resource" "demo_application_resource" { policy { web { type = "Rule" - id = pingaccess_rule.demo_rule_one.id + id = pingaccess_rule.demo_1.id } web { type = "Rule" - id = pingaccess_rule.demo_rule_two.id + id = pingaccess_rule.demo_2.id } } } diff --git a/pingaccess/test_cases/auth_token_management.tf b/func-tests/auth_token_management.tf similarity index 100% rename from pingaccess/test_cases/auth_token_management.tf rename to func-tests/auth_token_management.tf diff --git a/pingaccess/test_cases/authn_req_list.tf b/func-tests/authn_req_list.tf similarity index 100% rename from pingaccess/test_cases/authn_req_list.tf rename to func-tests/authn_req_list.tf diff --git a/pingaccess/test_cases/certificate.tf b/func-tests/certificate.tf similarity index 53% rename from pingaccess/test_cases/certificate.tf rename to func-tests/certificate.tf index 6dde155e..a9a4e599 100644 --- a/pingaccess/test_cases/certificate.tf +++ b/func-tests/certificate.tf @@ -1,4 +1,4 @@ resource "pingaccess_certificate" "demo_certificate" { alias = "demo" - file_data = base64encode(file("test_cases/amazon_root_ca1.pem")) + file_data = base64encode(file("amazon_root_ca1.pem")) } diff --git a/pingaccess/test_cases/engine_listener.tf b/func-tests/engine_listener.tf similarity index 100% rename from pingaccess/test_cases/engine_listener.tf rename to func-tests/engine_listener.tf diff --git a/pingaccess/test_cases/hsm_provider.tf b/func-tests/hsm_provider.tf similarity index 71% rename from pingaccess/test_cases/hsm_provider.tf rename to func-tests/hsm_provider.tf index 610dcc49..62dcf059 100644 --- a/pingaccess/test_cases/hsm_provider.tf +++ b/func-tests/hsm_provider.tf @@ -3,9 +3,9 @@ resource "pingaccess_hsm_provider" "test" { name = "demo" configuration = <3IG6sHw;O$HzUlq0DMKuD^be{co_ zNvipu2)`UjD)SGL00MCSc|!k50Fa0H#Q)tu3c!bug9t5k*85@~f;WIbLYzn>DGp`J z4Dcc(80Y4!{~*LuTq9?#^zp#RlG8 z&^%5Mmpb*ef?Pr8{4T;fRB?ufD0QA2u6|+HflH~gNDB=1Bb+5A+CF9sT`+YdefFcC z>;`|Y(t)5`!PXDH%}7vn{*q-Xf&)3Kbk-u68Rb!TxM$!>L*O5BN@CB?R#3~O2F1IZ zW&w&`Je_8hjgE*`Y5q<2QyS;nsIDbf*UjqdGw?`AW+@MKkF%I*SYq~&3>kmW(6x8o25`EIOFx+GRyXA^z-tyV3A8A zR}F;9Uy~&VrLK6NhzrPn)Txu4Ar}V!pz-?fUW(X`9m_tWe=%u&F7R#8duK1E4k_c@ zmJaQ(dhC=kTPT<;kfA-!raR|?`U1_mB#d=f6^*F*>mUq*5NY{AHX32rZWed6w za!LFI1QqZ0!g~8PZ*b`J@5pkL>MHbBQD$kOea_gix{z;w9UuO2^9nNWu^4*V>I0?W z&(wwPc!}?~{cP-(1#h&9(ux_PON^}p-3Cq6Ojq?%hB{yB|0;5VMQ!Ql+M8Y^?w?GD z-@TuXtQ92pS5VNSIcl9Cy0xq;B8VW~ord>joI3|cc76c;GLq>f1 z2vbD&@#S_~7?Dk1WAcRi7h3JD%#!M$-K@v@h0ZJn*FayAchx!_IC#y(rayPM(%tYqTU)=IjB9c!@XM~A63aIc?p?slQi6wx=|T5 zJLQSq@QP#q5GUV@%trVogN8pTMlB?SnWdKHdL%a553*mc(647#Bl!|M2>XA^J}VA7 zIoctM1bErhq5K>imFDLFs9%4nqN$Vg!VU{P7si;=DJ7b?Ka#DdE%=-mHMvNK+=fa(n zv?FLU7cF$}1<%#O4!E!A(zIdcu6n$H>*?v1bzp5k9)`LRVT_=R>gF8)A<6~4hQ~aB zz8Hh+a1e#4xN?uO`gr+Ueh#N`tqGY0v4eF_{;6X8Fq5XpGrnE)AG)#)NInaKm9!UU zN!3IKB`iR8ijpEFv5CPLZt=zMKj@TqjZzeC6SpPxo^+;C&6YG(C3Bd5y0VLQ9AJl| zi2PLp7tk8p^7G3hMRd5b^Z{{_%GW|?PmS?MJvV^<4Qu@}gfb^GQ<~n|MYOdeErrmn zs+fm^x^l1eniLb!2JvO& z?SM^^V&^B2OO@X4j+})XVMMPeY&9A3SAfOD^sAgeLaPV0(eet%xu*$t{AwRgEwxC8 zJ5Ix&f=i;UzvYug6q#W(f568*)qci&5_5-p&gV(Tu@@a?&nhjydkAe>>gca$%jz9O z@y!Mq5;!=-hR_ZrTp04DpuMHE282%TwN&hVB!0&kK3!XAV_g{+IjIi-os_xl5N?#v zN^1lyzyepS6{~alU%XP03K6`3_HLHHtBCu1rsJK~HVp@Ea*t?e%DWm&i1KO?qu+K7 zHLpU$n5b0gChVAS-y&BzNCymRQmeAq5=@poFeE#y!D(!5bLjaXhZrvXG8ya}QjYt= z)~}jGeq1nhn-%Aa6Ku3TiAma`tC1xeDP%mcWaKgRfEUj3v4YyuG5`qx?jygS_dxRD zo5~w7Pup#o(^ey!SH|_EeOqZre)T@3X^x>K$MMu;8Qh*)xE?#>(<8Za4StF1 zJ=+xVvqXOGBGJi#N)fLLv&YNO#B1o{Hvl4=vm103_7nuf>jX<14fO6ko3tw(TGYSS zNFSR|S(P(L_>NOHBmk3r^W*X;AAA&^bh!{msqw2k{|u;N#iF-7Gb(ycBIKfjMeJT)klI~`avn(@}J*t21tLN@i*|^6?v^}Ze~Rd<#4f$eKikF zRBMfF&Y}w%eWTp_d(g^rLN22aC4eI^T}sVEfV#O)2|yle7rSR=E9gGR_Yg=`V3~_^xnIy<34?>8Cr#l0&ns50vgRPu1jWtoF<` zB90!_EW|bBQgp{?D()H$ts!VXgq+%25AFpyuQ$M^F}aBjudRlqqf34m6>*q!0o7k=;G2nTrUrG{FXqmTw7IxL|+`xgIwu8&+E=zx%RMEK5hbD zl`5D#&_FrqQ~qflpHej{6@v62hqwZ$@vWi?T&x7*NhgrtuOx5o6H`${k zx^DIoeHYrrWhk#pvFDg!h}n^4>)b<7$!b;H*fC6SNA;SL70 zZ*FUy>kub8Y{Q%nAnM)uif{=PVLcOGzLSrFX~e6~D*VrM#03nNR<5eQjsA*QL3{E% z&XfL-d|}XioA^B_*x3y70yDI^(X}^fI2t0XMKRUTF9#~gw&Z$ehOUMPUxtKAr*ix? z1_M8wb*K*r*AVTq_4Hh&Fc0OkdWOd|?aa{5W3jI%Tt}gm%B=bQ>BIS|XevjaOGp29 zk_&pdXrcIehRDFn%LDOP0-t?}XyNf2iM?)akLlbHs%CZY>`&GSaCz5SRJ3>O$4&9y zr*e2B-_yVm*2w}NIw6L)Q)nfkE<&2*R4=+HZLRTuK=G!fwp*v=t9KK2UPexQ38K=a zPF#R|w|8tK5$<79&ZU_%BgNsh3zKT-M-h8}3{w_eMWe?vG80E@8x@79`so}P8`v{Q zx`q+zkm z#AM7_Ox&v9+Ov>sd{P$EsDPrr%bHo_>B{sO7&AIg!?_QCPkllzmB~&R#Rxo)*PUuqxnD7I<S=p>pI+{Pansb?p*eJ>_wpvL9+n+2v z?F!$~T)^fO{B;07lzu@Q(UTWi_5M+y8;jSSJu7tEE#OX_KLFxip{GUSBFK8<{f{@` zDgPf~C`aOnfslB@|KLae91lLp|L6lT09cL$9Yc_yga2n0xc^#3y6=kcOszD*zg7W4 zg2qRBUv8ZB($r{S;^gzrU0o|@J~{4ti~}StzAqo7tJkcuzM%z;{rsu@Npf=Ry|0Wo zj3S?>QP-2Q(N@jrq~jV*@Cz41P+|aeBzbr1=N;N&&!a~ix^7W#+!bRNKdWfbE-+kC zmx2YnT~Tc_O2@y?Uc)$ECcCs7#yxQ}moXwzIek4t*2W|-c_@E)r1hI`;R`+wZpD#T z@bK;(x5yF2IU9e?x>87}W;Vf?Th$FSHz8^m?C>R{D}b>Y^3V;3i z%WH$+G&Jg{1=f)q*t%oBF0S_=k2mP|4TMF&nnMm_0rZm;rDoOlyb5Kuil=^l_m{DT zDJ2EdapkInF&g?Lx$eT(9bS(%MPI3vg}aZ7=SsKKsSga1C=rD`4Zr0>yKCaKh^$p> z7ueL)n_6(L32n68cs(S~Z4?WwAFEWAudA>H$X;R%C?cjaP7E;8{Z}DklNq1)uT$eq zE%GeEW8AiU#s2M0n_>r0B3GN!N7TaZh7aVX31>OG5=(Y`@kj5bCB;zRS&(O?#=l74 zQ&tTw2TRTl`gkl*u=XA*z*+_w23|RJrHb7K_Z8yy-GNJ$P#Q zo)xvHx;B<$gpEEbROa^Gy2i=}{hol2Hv{$wBO>A+%p_s=2z*9@F^X^N%5=YRZZ^R) zU$X;GIjg}`_jp~j5=pi#L3J4j#M}c`Uaj#hluWBe3PVJN%d6v{rAx9V4MXFZ@cwY` zoJ^VE`F*_WRX(prlgq}o1)VfQ7;lTy_#a$!3)oFZYhve`Iv%2Y5=M2P>x{Ms6%x)f zXwa2U(JJ^srw5~cJswXMuJ@H$r_i_L`_75 z#$j>ut|!y9vFEg-{9=LWEH+f+1Se6q;AveJ%^y}~xS1Ok@$HJk)c4idI2U82hHvG; zskwf`w4zbp_LhNc7{J<>{jwaTVbHg|BAqB(){XMAm`luF4 zi(x0Y=-Rd9Bkx!veKX^*_*%Q)EN6Giyth2aorBW;k|(sjP(!Wwy=8Z7QAPeh^m&1! z+3vkp0LxReuVS;EassqHiT;@))1qbm(&9-Al;L0j1 zLZZN!m`E-BBYpa;R`0_2{a;n~KQ9qN+Xx}mIZ^0j=MeGU? zb<)q77c}>3M-CziVTTam;|Y-C0-5k|K(w4mN@$I;HzZNUDYdN!ZKTohut*SLj0Q0w Ls&G=_- Date: Tue, 30 Jun 2020 21:55:31 +0100 Subject: [PATCH 17/23] fix up tests --- docs/resources/pingaccess_acme_server.md | 5 ++++- docs/resources/pingaccess_hsm_provider.md | 12 ++++++++++- func-tests/acme_server.tf | 5 +++-- func-tests/hsm_provider.tf | 1 + pingaccess/test_cases/amazon_root_ca1.pem | 17 +++++++++++++++ pingaccess/test_cases/amazon_root_ca2.pem | 26 +++++++++++++++++++++++ pingaccess/test_cases/amazon_root_ca3.pem | 12 +++++++++++ 7 files changed, 74 insertions(+), 4 deletions(-) create mode 100644 pingaccess/test_cases/amazon_root_ca1.pem create mode 100644 pingaccess/test_cases/amazon_root_ca2.pem create mode 100644 pingaccess/test_cases/amazon_root_ca3.pem diff --git a/docs/resources/pingaccess_acme_server.md b/docs/resources/pingaccess_acme_server.md index 89c28dce..eea1c09e 100644 --- a/docs/resources/pingaccess_acme_server.md +++ b/docs/resources/pingaccess_acme_server.md @@ -4,7 +4,10 @@ Provides a acme server. ## Example Usage ```terraform -{!../func-tests//acme_server.tf!} +resource "pingaccess_acme_server" "test" { + name = "example" + url = "https://acme-staging-v02.api.letsencrypt.org/directory" +} ``` ## Argument Attributes diff --git a/docs/resources/pingaccess_hsm_provider.md b/docs/resources/pingaccess_hsm_provider.md index 26ddfe29..1b9c9d66 100644 --- a/docs/resources/pingaccess_hsm_provider.md +++ b/docs/resources/pingaccess_hsm_provider.md @@ -7,7 +7,17 @@ Provides a HSM provider. ## Example Usage ```terraform -{!../func-tests//hsm_provider.tf!} +resource "pingaccess_hsm_provider" "test" { + class_name = "com.pingidentity.pa.hsm.cloudhsm.plugin.AwsCloudHsmProvider" + name = "demo" + configuration = < Date: Sat, 4 Jul 2020 22:29:04 +0100 Subject: [PATCH 18/23] use refactored API client --- go.mod | 7 +- go.sum | 35 +- pingaccess/config.go | 161 +++++- .../data_source_pingaccess_acme_default.go | 3 +- .../data_source_pingaccess_certificate.go | 7 +- pingaccess/data_source_pingaccess_keypair.go | 7 +- ...ingaccess_pingfederate_runtime_metadata.go | 53 +- ...e_pingaccess_trusted_certificate_groups.go | 7 +- pingaccess/provider.go | 2 +- pingaccess/provider_test.go | 12 +- ...ource_pingaccess_access_token_validator.go | 28 +- ..._pingaccess_access_token_validator_test.go | 7 +- pingaccess/resource_pingaccess_acme_server.go | 22 +- .../resource_pingaccess_acme_server_test.go | 12 +- pingaccess/resource_pingaccess_application.go | 26 +- ...esource_pingaccess_application_resource.go | 30 +- ...ce_pingaccess_application_resource_test.go | 41 +- .../resource_pingaccess_application_test.go | 22 +- ...source_pingaccess_auth_token_management.go | 17 +- ...e_pingaccess_auth_token_management_test.go | 9 +- .../resource_pingaccess_authn_req_list.go | 26 +- ...resource_pingaccess_authn_req_list_test.go | 12 +- pingaccess/resource_pingaccess_certificate.go | 26 +- .../resource_pingaccess_certificate_test.go | 11 +- .../resource_pingaccess_engine_listener.go | 26 +- ...esource_pingaccess_engine_listener_test.go | 14 +- .../resource_pingaccess_hsm_provider.go | 28 +- .../resource_pingaccess_hsm_provider_test.go | 11 +- ...gaccess_http_config_request_host_source.go | 18 +- ...ss_http_config_request_host_source_test.go | 9 +- .../resource_pingaccess_https_listener.go | 22 +- ...resource_pingaccess_https_listener_test.go | 12 +- .../resource_pingaccess_identity_mapping.go | 28 +- ...source_pingaccess_identity_mapping_test.go | 51 +- pingaccess/resource_pingaccess_keypair.go | 30 +- .../resource_pingaccess_keypair_test.go | 11 +- .../resource_pingaccess_oauth_server.go | 17 +- .../resource_pingaccess_oauth_server_test.go | 19 +- .../resource_pingaccess_pingfederate_admin.go | 17 +- ...urce_pingaccess_pingfederate_admin_test.go | 15 +- .../resource_pingaccess_pingfederate_oauth.go | 17 +- ...urce_pingaccess_pingfederate_oauth_test.go | 13 +- ...esource_pingaccess_pingfederate_runtime.go | 26 +- ...ce_pingaccess_pingfederate_runtime_test.go | 19 +- pingaccess/resource_pingaccess_rule.go | 28 +- pingaccess/resource_pingaccess_rule_test.go | 7 +- pingaccess/resource_pingaccess_rulesets.go | 26 +- .../resource_pingaccess_rulesets_test.go | 11 +- pingaccess/resource_pingaccess_site.go | 26 +- .../resource_pingaccess_site_authenticator.go | 28 +- ...urce_pingaccess_site_authenticator_test.go | 46 +- pingaccess/resource_pingaccess_site_test.go | 14 +- ...resource_pingaccess_third_party_service.go | 26 +- ...rce_pingaccess_third_party_service_test.go | 14 +- ...e_pingaccess_trusted_certificate_groups.go | 26 +- ...gaccess_trusted_certificate_groups_test.go | 12 +- pingaccess/resource_pingaccess_virtualhost.go | 26 +- .../resource_pingaccess_virtualhost_test.go | 14 +- pingaccess/resource_pingaccess_websession.go | 26 +- .../resource_pingaccess_websession_test.go | 22 +- pingaccess/structures.go | 57 +- pingaccess/structures_test.go | 13 +- .../github.com/google/go-cmp/cmp/compare.go | 87 ++- .../google/go-cmp/cmp/export_panic.go | 4 +- .../google/go-cmp/cmp/export_unsafe.go | 6 +- .../github.com/google/go-cmp/cmp/options.go | 55 +- vendor/github.com/google/go-cmp/cmp/path.go | 71 ++- .../google/go-cmp/cmp/report_compare.go | 17 +- .../google/go-cmp/cmp/report_reflect.go | 23 +- .../google/go-cmp/cmp/report_slices.go | 4 +- vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/CONTRIBUTING.md | 10 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 19 + vendor/github.com/google/uuid/dce.go | 80 +++ vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/go.mod | 1 + vendor/github.com/google/uuid/hash.go | 53 ++ vendor/github.com/google/uuid/marshal.go | 37 ++ vendor/github.com/google/uuid/node.go | 90 +++ vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 ++ vendor/github.com/google/uuid/sql.go | 59 ++ vendor/github.com/google/uuid/time.go | 123 ++++ vendor/github.com/google/uuid/util.go | 43 ++ vendor/github.com/google/uuid/uuid.go | 245 ++++++++ vendor/github.com/google/uuid/version1.go | 44 ++ vendor/github.com/google/uuid/version4.go | 38 ++ .../pingaccess/accessTokenValidators.go | 185 ------ .../pingaccess-sdk-go/pingaccess/acme.go | 458 --------------- .../pingaccess/adminConfig.go | 226 -------- .../pingaccess/adminSessionInfo.go | 72 --- .../pingaccess-sdk-go/pingaccess/agents.go | 269 --------- .../pingaccess/applications.go | 546 ------------------ .../pingaccess-sdk-go/pingaccess/auth.go | 268 --------- .../pingaccess/authTokenManagement.go | 120 ---- .../pingaccess/authnReqLists.go | 165 ------ .../pingaccess-sdk-go/pingaccess/backup.go | 32 - .../pingaccess/certificates.go | 191 ------ .../pingaccess/client/client.go | 45 ++ .../pingaccess/client/metadata/clientinfo.go | 9 + .../pingaccess-sdk-go/pingaccess/config.go | 219 ------- .../pingaccess/config/config.go | 57 ++ .../pingaccess/engineListeners.go | 165 ------ .../pingaccess-sdk-go/pingaccess/engines.go | 286 --------- .../pingaccess/globalUnprotectedResources.go | 165 ------ .../pingaccess/highAvailability.go | 409 ------------- .../pingaccess/hsmProviders.go | 185 ------ .../pingaccess/httpConfig.go | 268 --------- .../pingaccess/httpsListeners.go | 99 ---- .../pingaccess/identityMappings.go | 211 ------- .../pingaccess-sdk-go/pingaccess/keyPairs.go | 391 ------------- .../pingaccess-sdk-go/pingaccess/license.go | 56 -- .../pingaccess/{ => models}/models.go | 54 +- .../pingaccess-sdk-go/pingaccess/oauth.go | 76 --- .../pingaccess/oauthKeyManagement.go | 120 ---- .../pingaccess-sdk-go/pingaccess/oidc.go | 143 ----- .../pingaccess/pingaccess.go | 247 -------- .../pingaccess/pingfederate.go | 308 ---------- .../pingaccess-sdk-go/pingaccess/pingone.go | 96 --- .../pingaccess-sdk-go/pingaccess/proxies.go | 165 ------ .../pingaccess-sdk-go/pingaccess/redirects.go | 169 ------ .../pingaccess/rejectionHandlers.go | 211 ------- .../pingaccess/request/request.go | 234 ++++++++ .../pingaccess-sdk-go/pingaccess/rules.go | 211 ------- .../pingaccess-sdk-go/pingaccess/rulesets.go | 205 ------- .../pingaccess/sharedSecrets.go | 122 ---- .../pingaccess/siteAuthenticators.go | 211 ------- .../pingaccess-sdk-go/pingaccess/sites.go | 165 ------ .../pingaccess/thirdPartyServices.go | 165 ------ .../pingaccess/tokenProvider.go | 76 --- .../pingaccess/trustedCertificateGroups.go | 165 ------ .../pingaccess/unknownResources.go | 76 --- .../pingaccess-sdk-go/pingaccess/users.go | 142 ----- .../pingaccess-sdk-go/pingaccess/version.go | 32 - .../pingaccess/virtualhosts.go | 165 ------ .../pingaccess/webSessionManagement.go | 269 --------- .../pingaccess/webSessions.go | 165 ------ .../accessTokenValidators.go | 214 +++++++ .../accessTokenValidators/interface.go | 16 + .../pingaccess-sdk-go/services/acme/acme.go | 475 +++++++++++++++ .../services/acme/interface.go | 24 + .../services/adminConfig/adminConfig.go | 267 +++++++++ .../services/adminConfig/interface.go | 19 + .../adminSessionInfo/adminSessionInfo.go | 107 ++++ .../services/adminSessionInfo/interface.go | 13 + .../services/agents/agents.go | 289 +++++++++ .../services/agents/interface.go | 18 + .../services/applications/applications.go | 542 +++++++++++++++++ .../services/applications/interface.go | 27 + .../pingaccess-sdk-go/services/auth/auth.go | 302 ++++++++++ .../services/auth/interface.go | 22 + .../authTokenManagement.go | 157 +++++ .../services/authTokenManagement/interface.go | 15 + .../services/authnReqLists/authnReqLists.go | 195 +++++++ .../services/authnReqLists/interface.go | 15 + .../services/backup/backup.go | 68 +++ .../services/backup/interface.go | 7 + .../services/certificates/certificates.go | 222 +++++++ .../services/certificates/interface.go | 16 + .../services/config/config.go | 258 +++++++++ .../services/config/interface.go | 19 + .../engineListeners/engineListeners.go | 195 +++++++ .../services/engineListeners/interface.go | 15 + .../services/engines/engines.go | 305 ++++++++++ .../services/engines/interface.go | 19 + .../globalUnprotectedResources.go | 195 +++++++ .../globalUnprotectedResources/interface.go | 15 + .../highAvailability/highAvailability.go | 431 ++++++++++++++ .../services/highAvailability/interface.go | 24 + .../services/hsmProviders/hsmProviders.go | 214 +++++++ .../services/hsmProviders/interface.go | 16 + .../services/httpConfig/httpConfig.go | 302 ++++++++++ .../services/httpConfig/interface.go | 22 + .../services/httpsListeners/httpsListeners.go | 135 +++++ .../services/httpsListeners/interface.go | 13 + .../identityMappings/identityMappings.go | 241 ++++++++ .../services/identityMappings/interface.go | 17 + .../services/keyPairs/interface.go | 24 + .../services/keyPairs/keyPairs.go | 426 ++++++++++++++ .../services/license/interface.go | 12 + .../services/license/license.go | 94 +++ .../services/oauth/interface.go | 13 + .../pingaccess-sdk-go/services/oauth/oauth.go | 113 ++++ .../services/oauthKeyManagement/interface.go | 15 + .../oauthKeyManagement/oauthKeyManagement.go | 157 +++++ .../services/oidc/interface.go | 16 + .../pingaccess-sdk-go/services/oidc/oidc.go | 179 ++++++ .../services/pingfederate/interface.go | 24 + .../services/pingfederate/pingfederate.go | 340 +++++++++++ .../services/pingone/interface.go | 14 + .../services/pingone/pingone.go | 132 +++++ .../services/proxies/interface.go | 15 + .../services/proxies/proxies.go | 195 +++++++ .../services/redirects/interface.go | 15 + .../services/redirects/redirects.go | 197 +++++++ .../services/rejectionHandlers/interface.go | 17 + .../rejectionHandlers/rejectionHandlers.go | 241 ++++++++ .../services/rules/interface.go | 17 + .../pingaccess-sdk-go/services/rules/rules.go | 241 ++++++++ .../services/rulesets/interface.go | 17 + .../services/rulesets/rulesets.go | 233 ++++++++ .../services/sharedSecrets/interface.go | 14 + .../services/sharedSecrets/sharedSecrets.go | 159 +++++ .../services/siteAuthenticators/interface.go | 17 + .../siteAuthenticators/siteAuthenticators.go | 241 ++++++++ .../services/sites/interface.go | 15 + .../pingaccess-sdk-go/services/sites/sites.go | 195 +++++++ .../services/thirdPartyServices/interface.go | 15 + .../thirdPartyServices/thirdPartyServices.go | 195 +++++++ .../services/tokenProvider/interface.go | 13 + .../services/tokenProvider/tokenProvider.go | 113 ++++ .../trustedCertificateGroups/interface.go | 15 + .../trustedCertificateGroups.go | 195 +++++++ .../services/unknownResources/interface.go | 13 + .../unknownResources/unknownResources.go | 113 ++++ .../services/users/interface.go | 14 + .../pingaccess-sdk-go/services/users/users.go | 171 ++++++ .../services/version/interface.go | 11 + .../services/version/version.go | 69 +++ .../services/virtualhosts/interface.go | 15 + .../services/virtualhosts/virtualhosts.go | 195 +++++++ .../webSessionManagement/interface.go | 22 + .../webSessionManagement.go | 298 ++++++++++ .../services/webSessions/interface.go | 15 + .../services/webSessions/webSessions.go | 195 +++++++ vendor/modules.txt | 61 +- 228 files changed, 12911 insertions(+), 9389 deletions(-) create mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/go.mod create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/accessTokenValidators.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/acme.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminConfig.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminSessionInfo.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/agents.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/applications.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/auth.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authTokenManagement.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authnReqLists.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/backup.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/certificates.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/client.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata/clientinfo.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config/config.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engineListeners.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engines.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/globalUnprotectedResources.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/highAvailability.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/hsmProviders.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpConfig.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpsListeners.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/identityMappings.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/keyPairs.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/license.go rename vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/{ => models}/models.go (95%) delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauth.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauthKeyManagement.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oidc.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingfederate.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingone.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/proxies.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/redirects.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rejectionHandlers.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/request/request.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rules.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rulesets.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sharedSecrets.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/siteAuthenticators.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sites.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/thirdPartyServices.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/tokenProvider.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/trustedCertificateGroups.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/unknownResources.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/users.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/version.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/virtualhosts.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessionManagement.go delete mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessions.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators/accessTokenValidators.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/acme/acme.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/acme/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminConfig/adminConfig.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminConfig/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminSessionInfo/adminSessionInfo.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminSessionInfo/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/agents/agents.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/agents/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/applications/applications.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/applications/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/auth/auth.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/auth/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement/authTokenManagement.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists/authnReqLists.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/backup/backup.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/backup/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/certificates/certificates.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/certificates/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/config/config.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/config/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/engineListeners/engineListeners.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/engineListeners/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/engines/engines.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/engines/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/globalUnprotectedResources/globalUnprotectedResources.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/globalUnprotectedResources/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/highAvailability/highAvailability.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/highAvailability/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders/hsmProviders.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpConfig/httpConfig.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpConfig/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners/httpsListeners.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/identityMappings/identityMappings.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/identityMappings/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/keyPairs/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/keyPairs/keyPairs.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/license/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/license/license.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauth/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauth/oauth.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauthKeyManagement/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauthKeyManagement/oauthKeyManagement.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/oidc/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/oidc/oidc.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingfederate/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingfederate/pingfederate.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingone/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingone/pingone.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/proxies/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/proxies/proxies.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/redirects/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/redirects/redirects.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/rejectionHandlers/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/rejectionHandlers/rejectionHandlers.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/rules/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/rules/rules.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/rulesets/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/rulesets/rulesets.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/sharedSecrets/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/sharedSecrets/sharedSecrets.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators/siteAuthenticators.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/sites/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/sites/sites.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices/thirdPartyServices.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/tokenProvider/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/tokenProvider/tokenProvider.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups/trustedCertificateGroups.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/unknownResources/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/unknownResources/unknownResources.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/users/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/users/users.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/version/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/version/version.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts/virtualhosts.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessionManagement/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessionManagement/webSessionManagement.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessions/interface.go create mode 100644 vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessions/webSessions.go diff --git a/go.mod b/go.mod index 10516d41..75fdabce 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,12 @@ module github.com/iwarapter/terraform-provider-pingaccess go 1.14 require ( - github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c // indirect - github.com/google/go-cmp v0.3.1 + github.com/google/go-cmp v0.4.1 + github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0-rc.2 - github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200614193934-b24fe6e545e5 + github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200704204151-1493c5058af2 github.com/ory/dockertest/v3 v3.6.0 github.com/pkg/errors v0.9.1 // indirect github.com/sirupsen/logrus v1.5.0 // indirect @@ -17,4 +17,5 @@ require ( golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa // indirect gopkg.in/yaml.v2 v2.2.8 // indirect + gotest.tools v2.2.0+incompatible // indirect ) diff --git a/go.sum b/go.sum index d3d4fed6..18a0d770 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,6 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7O github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= -github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -37,7 +35,6 @@ github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= @@ -45,19 +42,14 @@ github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4r github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c h1:8ahmSVELW1wghbjerVAyuEYD5+Dio66RYvSS0iGfL1M= github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -92,13 +84,18 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -149,8 +146,8 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKe github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200614193934-b24fe6e545e5 h1:Tks2Y325NgWNlODjGUslW3V2IRa6kYwsyg0NnyH0BOc= -github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200614193934-b24fe6e545e5/go.mod h1:5FnKTPlitpx5z7Q/itbtKkgCEOBXXqhGOJqmryBZrZk= +github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200704204151-1493c5058af2 h1:UjSjmCs+ZUTpTeadFKMU32qh4WFOf/2qYu+VLr3vbiI= +github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200704204151-1493c5058af2/go.mod h1:ll8wM67ZyNn1yP1fgEz7uRi3ELrG9oRza1NX2+kB5Qc= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= @@ -173,8 +170,8 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= @@ -211,12 +208,11 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.0-rc6 h1:7AoN22rYxxkmsJS48wFaziH/n0OvrZVqL/TglgHKbKQ= -github.com/opencontainers/runc v1.0.0-rc6/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/ory/dockertest v3.3.4+incompatible h1:VrpM6Gqg7CrPm3bL4Wm1skO+zFWLbh7/Xb5kGEbJRh8= -github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/ory/dockertest/v3 v3.6.0 h1:I6KNJ6izxGduLACQii2SP/g7GN0JM9Xfaik6aAVaw6Y= github.com/ory/dockertest/v3 v3.6.0/go.mod h1:4ZOpj8qBUmh8fcBSVzkH2bws2s91JdGvHUqan4GHEuQ= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -231,7 +227,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -269,7 +264,6 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -298,6 +292,7 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= @@ -317,7 +312,6 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190311152110-c8c8c57fd1e1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -329,6 +323,7 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191018095205-727590c5006e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200121082415-34d275377bf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -355,6 +350,8 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= diff --git a/pingaccess/config.go b/pingaccess/config.go index 1191f5b0..29ee5f55 100644 --- a/pingaccess/config.go +++ b/pingaccess/config.go @@ -6,27 +6,176 @@ import ( "net/url" "os" + "github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators" + "github.com/iwarapter/pingaccess-sdk-go/services/acme" + "github.com/iwarapter/pingaccess-sdk-go/services/adminConfig" + "github.com/iwarapter/pingaccess-sdk-go/services/adminSessionInfo" + "github.com/iwarapter/pingaccess-sdk-go/services/agents" + "github.com/iwarapter/pingaccess-sdk-go/services/applications" + "github.com/iwarapter/pingaccess-sdk-go/services/auth" + "github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement" + "github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists" + "github.com/iwarapter/pingaccess-sdk-go/services/backup" + "github.com/iwarapter/pingaccess-sdk-go/services/certificates" + "github.com/iwarapter/pingaccess-sdk-go/services/config" + "github.com/iwarapter/pingaccess-sdk-go/services/engineListeners" + "github.com/iwarapter/pingaccess-sdk-go/services/engines" + "github.com/iwarapter/pingaccess-sdk-go/services/globalUnprotectedResources" + "github.com/iwarapter/pingaccess-sdk-go/services/highAvailability" + "github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders" + "github.com/iwarapter/pingaccess-sdk-go/services/httpConfig" + "github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners" + "github.com/iwarapter/pingaccess-sdk-go/services/identityMappings" + "github.com/iwarapter/pingaccess-sdk-go/services/keyPairs" + "github.com/iwarapter/pingaccess-sdk-go/services/license" + "github.com/iwarapter/pingaccess-sdk-go/services/oauth" + "github.com/iwarapter/pingaccess-sdk-go/services/oauthKeyManagement" + "github.com/iwarapter/pingaccess-sdk-go/services/oidc" + "github.com/iwarapter/pingaccess-sdk-go/services/pingfederate" + "github.com/iwarapter/pingaccess-sdk-go/services/pingone" + "github.com/iwarapter/pingaccess-sdk-go/services/proxies" + "github.com/iwarapter/pingaccess-sdk-go/services/redirects" + "github.com/iwarapter/pingaccess-sdk-go/services/rejectionHandlers" + "github.com/iwarapter/pingaccess-sdk-go/services/rules" + "github.com/iwarapter/pingaccess-sdk-go/services/rulesets" + "github.com/iwarapter/pingaccess-sdk-go/services/sharedSecrets" + "github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators" + "github.com/iwarapter/pingaccess-sdk-go/services/sites" + "github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices" + "github.com/iwarapter/pingaccess-sdk-go/services/tokenProvider" + "github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups" + "github.com/iwarapter/pingaccess-sdk-go/services/unknownResources" + "github.com/iwarapter/pingaccess-sdk-go/services/users" + "github.com/iwarapter/pingaccess-sdk-go/services/version" + "github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts" + "github.com/iwarapter/pingaccess-sdk-go/services/webSessionManagement" + "github.com/iwarapter/pingaccess-sdk-go/services/webSessions" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + paCfg "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" ) -type config struct { +type cfg struct { Username string Password string Context string BaseURL string } +type paClient struct { + AccessTokenValidators accessTokenValidators.AccessTokenValidatorsAPI + Acme acme.AcmeAPI + AdminConfig adminConfig.AdminConfigAPI + AdminSessionInfo adminSessionInfo.AdminSessionInfoAPI + Agents agents.AgentsAPI + Applications applications.ApplicationsAPI + Auth auth.AuthAPI + AuthTokenManagement authTokenManagement.AuthTokenManagementAPI + AuthnReqLists authnReqLists.AuthnReqListsAPI + Backup backup.BackupAPI + Certificates certificates.CertificatesAPI + Config config.ConfigAPI + EngineListeners engineListeners.EngineListenersAPI + Engines engines.EnginesAPI + GlobalUnprotectedResources globalUnprotectedResources.GlobalUnprotectedResourcesAPI + HighAvailability highAvailability.HighAvailabilityAPI + HsmProviders hsmProviders.HsmProvidersAPI + HttpConfig httpConfig.HttpConfigAPI + HttpsListeners httpsListeners.HttpsListenersAPI + IdentityMappings identityMappings.IdentityMappingsAPI + KeyPairs keyPairs.KeyPairsAPI + License license.LicenseAPI + Oauth oauth.OauthAPI + OauthKeyManagement oauthKeyManagement.OauthKeyManagementAPI + Oidc oidc.OidcAPI + Pingfederate pingfederate.PingfederateAPI + Pingone pingone.PingoneAPI + Proxies proxies.ProxiesAPI + Redirects redirects.RedirectsAPI + RejectionHandlers rejectionHandlers.RejectionHandlersAPI + Rules rules.RulesAPI + Rulesets rulesets.RulesetsAPI + SharedSecrets sharedSecrets.SharedSecretsAPI + SiteAuthenticators siteAuthenticators.SiteAuthenticatorsAPI + Sites sites.SitesAPI + ThirdPartyServices thirdPartyServices.ThirdPartyServicesAPI + TokenProvider tokenProvider.TokenProviderAPI + TrustedCertificateGroups trustedCertificateGroups.TrustedCertificateGroupsAPI + UnknownResources unknownResources.UnknownResourcesAPI + Users users.UsersAPI + Version version.VersionAPI + Virtualhosts virtualhosts.VirtualhostsAPI + WebSessionManagement webSessionManagement.WebSessionManagementAPI + WebSessions webSessions.WebSessionsAPI + + apiVersion string +} + // Client configures and returns a fully initialized PAClient -func (c *config) Client() (interface{}, diag.Diagnostics) { +func (c *cfg) Client() (interface{}, diag.Diagnostics) { /* #nosec */ http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - url, _ := url.Parse(c.BaseURL) - client := pingaccess.NewClient(c.Username, c.Password, url, c.Context, nil) + u, _ := url.Parse(c.BaseURL) + + cfg := paCfg.NewConfig().WithEndpoint(u.String() + c.Context).WithUsername(c.Username).WithPassword(c.Password) if os.Getenv("TF_LOG") == "DEBUG" || os.Getenv("TF_LOG") == "TRACE" { - client.LogDebug = true + cfg.WithDebug(true) } + + client := paClient{ + AccessTokenValidators: accessTokenValidators.New(cfg), + Acme: acme.New(cfg), + AdminConfig: adminConfig.New(cfg), + AdminSessionInfo: adminSessionInfo.New(cfg), + Agents: agents.New(cfg), + Applications: applications.New(cfg), + Auth: auth.New(cfg), + AuthTokenManagement: authTokenManagement.New(cfg), + AuthnReqLists: authnReqLists.New(cfg), + Backup: backup.New(cfg), + Certificates: certificates.New(cfg), + Config: config.New(cfg), + EngineListeners: engineListeners.New(cfg), + Engines: engines.New(cfg), + GlobalUnprotectedResources: globalUnprotectedResources.New(cfg), + HighAvailability: highAvailability.New(cfg), + HsmProviders: hsmProviders.New(cfg), + HttpConfig: httpConfig.New(cfg), + HttpsListeners: httpsListeners.New(cfg), + IdentityMappings: identityMappings.New(cfg), + KeyPairs: keyPairs.New(cfg), + License: license.New(cfg), + Oauth: oauth.New(cfg), + OauthKeyManagement: oauthKeyManagement.New(cfg), + Oidc: oidc.New(cfg), + Pingfederate: pingfederate.New(cfg), + Pingone: pingone.New(cfg), + Proxies: proxies.New(cfg), + Redirects: redirects.New(cfg), + RejectionHandlers: rejectionHandlers.New(cfg), + Rules: rules.New(cfg), + Rulesets: rulesets.New(cfg), + SharedSecrets: sharedSecrets.New(cfg), + SiteAuthenticators: siteAuthenticators.New(cfg), + Sites: sites.New(cfg), + ThirdPartyServices: thirdPartyServices.New(cfg), + TokenProvider: tokenProvider.New(cfg), + TrustedCertificateGroups: trustedCertificateGroups.New(cfg), + UnknownResources: unknownResources.New(cfg), + Users: users.New(cfg), + Version: version.New(cfg), + Virtualhosts: virtualhosts.New(cfg), + WebSessionManagement: webSessionManagement.New(cfg), + WebSessions: webSessions.New(cfg), + } + + v, _, err := client.Version.VersionCommand() + if err != nil { + return nil, diag.Errorf("unable to retrieve version %s", err) + } + client.apiVersion = *v.Version + return client, nil } diff --git a/pingaccess/data_source_pingaccess_acme_default.go b/pingaccess/data_source_pingaccess_acme_default.go index 086afd76..6e8feed7 100644 --- a/pingaccess/data_source_pingaccess_acme_default.go +++ b/pingaccess/data_source_pingaccess_acme_default.go @@ -5,7 +5,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func dataSourcePingAccessAcmeDefault() *schema.Resource { @@ -25,7 +24,7 @@ func dataSourcePingAccessAcmeDefaultSchema() map[string]*schema.Schema { } func dataSourcePingAccessAcmeDefaultRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Acme + svc := m.(paClient).Acme result, _, err := svc.GetDefaultAcmeServerCommand() if err != nil { return diag.Errorf("unable to read ACME Default: %s", err) diff --git a/pingaccess/data_source_pingaccess_certificate.go b/pingaccess/data_source_pingaccess_certificate.go index 51abb8ab..481ba888 100644 --- a/pingaccess/data_source_pingaccess_certificate.go +++ b/pingaccess/data_source_pingaccess_certificate.go @@ -4,9 +4,10 @@ import ( "context" "strconv" + "github.com/iwarapter/pingaccess-sdk-go/services/certificates" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func dataSourcePingAccessCertificate() *schema.Resource { @@ -20,8 +21,8 @@ func dataSourcePingAccessCertificate() *schema.Resource { } func dataSourcePingAccessCertificateRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Certificates - input := &pa.GetTrustedCertsInput{ + svc := m.(paClient).Certificates + input := &certificates.GetTrustedCertsInput{ Alias: d.Get("alias").(string), } result, _, err := svc.GetTrustedCerts(input) diff --git a/pingaccess/data_source_pingaccess_keypair.go b/pingaccess/data_source_pingaccess_keypair.go index a383452d..df6caee2 100644 --- a/pingaccess/data_source_pingaccess_keypair.go +++ b/pingaccess/data_source_pingaccess_keypair.go @@ -4,9 +4,10 @@ import ( "context" "strconv" + "github.com/iwarapter/pingaccess-sdk-go/services/keyPairs" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func dataSourcePingAccessKeyPair() *schema.Resource { @@ -30,8 +31,8 @@ func dataSourcePingAccessKeyPair() *schema.Resource { } func dataSourcePingAccessKeyPairRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).KeyPairs - input := &pa.GetKeyPairsCommandInput{ + svc := m.(paClient).KeyPairs + input := &keyPairs.GetKeyPairsCommandInput{ Alias: d.Get("alias").(string), } result, _, err := svc.GetKeyPairsCommand(input) diff --git a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go index 9194ccef..97ea3582 100644 --- a/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go +++ b/pingaccess/data_source_pingaccess_pingfederate_runtime_metadata.go @@ -5,7 +5,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func dataSourcePingAccessPingFederateRuntimeMetadata() *schema.Resource { @@ -89,63 +88,63 @@ func dataSourcePingAccessPingFederateRuntimeMetadataSchema() map[string]*schema. } func dataSourcePingAccessPingFederateRuntimeMetadataRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Pingfederate + svc := m.(paClient).Pingfederate result, resp, err := svc.GetPingFederateMetadataCommand() if err != nil { return diag.Errorf("unable to read PingFederate Runtime Metadata: %s\n%v", err, resp) } var diags diag.Diagnostics d.SetId("pingfederate_runtime_metadata") - setResourceDataStringWithDiagnostic(d, "authorization_endpoint", result.Authorization_endpoint, &diags) - setResourceDataStringWithDiagnostic(d, "backchannel_authentication_endpoint", result.Backchannel_authentication_endpoint, &diags) - if err := d.Set("claim_types_supported", result.Claim_types_supported); err != nil { + setResourceDataStringWithDiagnostic(d, "authorization_endpoint", result.AuthorizationEndpoint, &diags) + setResourceDataStringWithDiagnostic(d, "backchannel_authentication_endpoint", result.BackchannelAuthenticationEndpoint, &diags) + if err := d.Set("claim_types_supported", result.ClaimTypesSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - if err := d.Set("claims_parameter_supported", result.Claims_parameter_supported); err != nil { + if err := d.Set("claims_parameter_supported", result.ClaimsParameterSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - if err := d.Set("claims_supported", result.Claims_supported); err != nil { + if err := d.Set("claims_supported", result.ClaimsSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - if err := d.Set("code_challenge_methods_supported", result.Code_challenge_methods_supported); err != nil { + if err := d.Set("code_challenge_methods_supported", result.CodeChallengeMethodsSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - setResourceDataStringWithDiagnostic(d, "end_session_endpoint", result.End_session_endpoint, &diags) - if err := d.Set("grant_types_supported", result.Grant_types_supported); err != nil { + setResourceDataStringWithDiagnostic(d, "end_session_endpoint", result.EndSessionEndpoint, &diags) + if err := d.Set("grant_types_supported", result.GrantTypesSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - if err := d.Set("id_token_signing_alg_values_supported", result.Id_token_signing_alg_values_supported); err != nil { + if err := d.Set("id_token_signing_alg_values_supported", result.IdTokenSigningAlgValuesSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - setResourceDataStringWithDiagnostic(d, "introspection_endpoint", result.Introspection_endpoint, &diags) + setResourceDataStringWithDiagnostic(d, "introspection_endpoint", result.IntrospectionEndpoint, &diags) setResourceDataStringWithDiagnostic(d, "issuer", result.Issuer, &diags) - setResourceDataStringWithDiagnostic(d, "jwks_uri", result.Jwks_uri, &diags) - setResourceDataStringWithDiagnostic(d, "ping_end_session_endpoint", result.Ping_end_session_endpoint, &diags) - setResourceDataStringWithDiagnostic(d, "ping_revoked_sris_endpoint", result.Ping_revoked_sris_endpoint, &diags) - if err := d.Set("request_object_signing_alg_values_supported", result.Request_object_signing_alg_values_supported); err != nil { + setResourceDataStringWithDiagnostic(d, "jwks_uri", result.JwksUri, &diags) + setResourceDataStringWithDiagnostic(d, "ping_end_session_endpoint", result.PingEndSessionEndpoint, &diags) + setResourceDataStringWithDiagnostic(d, "ping_revoked_sris_endpoint", result.PingRevokedSrisEndpoint, &diags) + if err := d.Set("request_object_signing_alg_values_supported", result.RequestObjectSigningAlgValuesSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - setResourceDataBoolWithDiagnostic(d, "request_parameter_supported", result.Request_parameter_supported, &diags) - setResourceDataBoolWithDiagnostic(d, "request_uri_parameter_supported", result.Request_uri_parameter_supported, &diags) - if err := d.Set("response_modes_supported", result.Response_modes_supported); err != nil { + setResourceDataBoolWithDiagnostic(d, "request_parameter_supported", result.RequestParameterSupported, &diags) + setResourceDataBoolWithDiagnostic(d, "request_uri_parameter_supported", result.RequestUriParameterSupported, &diags) + if err := d.Set("response_modes_supported", result.ResponseModesSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - if err := d.Set("response_types_supported", result.Response_types_supported); err != nil { + if err := d.Set("response_types_supported", result.ResponseTypesSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - setResourceDataStringWithDiagnostic(d, "revocation_endpoint", result.Revocation_endpoint, &diags) - if err := d.Set("scopes_supported", result.Scopes_supported); err != nil { + setResourceDataStringWithDiagnostic(d, "revocation_endpoint", result.RevocationEndpoint, &diags) + if err := d.Set("scopes_supported", result.ScopesSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - if err := d.Set("subject_types_supported", result.Subject_types_supported); err != nil { + if err := d.Set("subject_types_supported", result.SubjectTypesSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - setResourceDataStringWithDiagnostic(d, "token_endpoint", result.Token_endpoint, &diags) - if err := d.Set("token_endpoint_auth_methods_supported", result.Token_endpoint_auth_methods_supported); err != nil { + setResourceDataStringWithDiagnostic(d, "token_endpoint", result.TokenEndpoint, &diags) + if err := d.Set("token_endpoint_auth_methods_supported", result.TokenEndpointAuthMethodsSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } - setResourceDataStringWithDiagnostic(d, "userinfo_endpoint", result.Userinfo_endpoint, &diags) - if err := d.Set("userinfo_signing_alg_values_supported", result.Userinfo_signing_alg_values_supported); err != nil { + setResourceDataStringWithDiagnostic(d, "userinfo_endpoint", result.UserinfoEndpoint, &diags) + if err := d.Set("userinfo_signing_alg_values_supported", result.UserinfoSigningAlgValuesSupported); err != nil { diags = append(diags, diag.FromErr(err)...) } return diags diff --git a/pingaccess/data_source_pingaccess_trusted_certificate_groups.go b/pingaccess/data_source_pingaccess_trusted_certificate_groups.go index e2a14135..1618093d 100644 --- a/pingaccess/data_source_pingaccess_trusted_certificate_groups.go +++ b/pingaccess/data_source_pingaccess_trusted_certificate_groups.go @@ -3,9 +3,10 @@ package pingaccess import ( "context" + "github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func dataSourcePingAccessTrustedCertificateGroups() *schema.Resource { @@ -48,8 +49,8 @@ func dataSourcePingAccessTrustedCertificateGroupsSchema() map[string]*schema.Sch } func dataSourcePingAccessTrustedCertificateGroupsRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).TrustedCertificateGroups - input := &pa.GetTrustedCertificateGroupsCommandInput{ + svc := m.(paClient).TrustedCertificateGroups + input := &trustedCertificateGroups.GetTrustedCertificateGroupsCommandInput{ Name: d.Get("name").(string), } result, _, err := svc.GetTrustedCertificateGroupsCommand(input) diff --git a/pingaccess/provider.go b/pingaccess/provider.go index 3b0e5953..c8ff73b5 100644 --- a/pingaccess/provider.go +++ b/pingaccess/provider.go @@ -88,7 +88,7 @@ func init() { func providerConfigure(_ context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { - config := &config{ + config := &cfg{ Username: d.Get("username").(string), Password: d.Get("password").(string), BaseURL: d.Get("base_url").(string), diff --git a/pingaccess/provider_test.go b/pingaccess/provider_test.go index 334936cf..dab724cd 100644 --- a/pingaccess/provider_test.go +++ b/pingaccess/provider_test.go @@ -4,6 +4,7 @@ import ( "context" "crypto/tls" "fmt" + "github.com/iwarapter/pingaccess-sdk-go/services/version" "log" "math/rand" "net" @@ -20,7 +21,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + paCfg "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" "github.com/ory/dockertest/v3" ) @@ -99,13 +100,14 @@ func TestMain(m *testing.M) { pool.MaxWait = time.Minute * 2 // exponential backoff-retry, because the application in the container might not be ready to accept connections yet - u, _ := url.Parse(fmt.Sprintf("https://localhost:%s", paCont.GetPort("9000/tcp"))) + u, _ := url.Parse(fmt.Sprintf("https://localhost:%s/pa-admin-api/v3", paCont.GetPort("9000/tcp"))) log.Printf("Setting PingAccess admin API: %s", u.String()) http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - client := pingaccess.NewClient("administrator", "2FederateM0re", u, "/pa-admin-api/v3", nil) + cfg := paCfg.NewConfig().WithUsername("administrator").WithPassword("2FederateM0re").WithEndpoint(u.String()) + client := version.New(cfg) if err = pool.Retry(func() error { log.Println("Attempting to connect to PingAccess admin API....") - _, _, err = client.Version.VersionCommand() + _, _, err = client.VersionCommand() return err }); err != nil { log.Fatalf("Could not connect to pingaccess: %s", err) @@ -116,7 +118,7 @@ func TestMain(m *testing.M) { os.Setenv("PINGFEDERATE_TEST_IP", strings.Replace(server.URL, "[::]", host, -1)) log.Println("Connected to PingAccess admin API....") - version, _, err := client.Version.VersionCommand() + version, _, err := client.VersionCommand() if err != nil { log.Fatalf("Failed to retrieve version from server: %v", err) } diff --git a/pingaccess/resource_pingaccess_access_token_validator.go b/pingaccess/resource_pingaccess_access_token_validator.go index 525d9374..3229aad8 100644 --- a/pingaccess/resource_pingaccess_access_token_validator.go +++ b/pingaccess/resource_pingaccess_access_token_validator.go @@ -5,10 +5,12 @@ import ( "encoding/json" "fmt" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessAccessTokenValidator() *schema.Resource { @@ -22,7 +24,7 @@ func resourcePingAccessAccessTokenValidator() *schema.Resource { }, Schema: resourcePingAccessAccessTokenValidatorSchema(), CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { - svc := m.(*pingaccess.Client).AccessTokenValidators + svc := m.(paClient).AccessTokenValidators desc, _, err := svc.GetAccessTokenValidatorDescriptorsCommand() if err != nil { return fmt.Errorf("unable to retrieve AccessTokenValidator descriptors %s", err) @@ -55,8 +57,8 @@ func resourcePingAccessAccessTokenValidatorSchema() map[string]*schema.Schema { } func resourcePingAccessAccessTokenValidatorCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).AccessTokenValidators - input := pingaccess.AddAccessTokenValidatorCommandInput{ + svc := m.(paClient).AccessTokenValidators + input := accessTokenValidators.AddAccessTokenValidatorCommandInput{ Body: *resourcePingAccessAccessTokenValidatorReadData(d), } @@ -70,9 +72,9 @@ func resourcePingAccessAccessTokenValidatorCreate(_ context.Context, d *schema.R } func resourcePingAccessAccessTokenValidatorRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).AccessTokenValidators + svc := m.(paClient).AccessTokenValidators - input := &pingaccess.GetAccessTokenValidatorCommandInput{ + input := &accessTokenValidators.GetAccessTokenValidatorCommandInput{ Id: d.Id(), } @@ -85,8 +87,8 @@ func resourcePingAccessAccessTokenValidatorRead(_ context.Context, d *schema.Res } func resourcePingAccessAccessTokenValidatorUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).AccessTokenValidators - input := pingaccess.UpdateAccessTokenValidatorCommandInput{ + svc := m.(paClient).AccessTokenValidators + input := accessTokenValidators.UpdateAccessTokenValidatorCommandInput{ Body: *resourcePingAccessAccessTokenValidatorReadData(d), Id: d.Id(), } @@ -101,15 +103,15 @@ func resourcePingAccessAccessTokenValidatorUpdate(_ context.Context, d *schema.R } func resourcePingAccessAccessTokenValidatorDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).AccessTokenValidators - _, err := svc.DeleteAccessTokenValidatorCommand(&pingaccess.DeleteAccessTokenValidatorCommandInput{Id: d.Id()}) + svc := m.(paClient).AccessTokenValidators + _, err := svc.DeleteAccessTokenValidatorCommand(&accessTokenValidators.DeleteAccessTokenValidatorCommandInput{Id: d.Id()}) if err != nil { return diag.Errorf("unable to delete AccessTokenValidator: %s", err) } return nil } -func resourcePingAccessAccessTokenValidatorReadResult(d *schema.ResourceData, input *pingaccess.AccessTokenValidatorView, svc pingaccess.AccessTokenValidatorsAPI) diag.Diagnostics { +func resourcePingAccessAccessTokenValidatorReadResult(d *schema.ResourceData, input *models.AccessTokenValidatorView, svc accessTokenValidators.AccessTokenValidatorsAPI) diag.Diagnostics { var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) @@ -128,11 +130,11 @@ func resourcePingAccessAccessTokenValidatorReadResult(d *schema.ResourceData, in return diags } -func resourcePingAccessAccessTokenValidatorReadData(d *schema.ResourceData) *pingaccess.AccessTokenValidatorView { +func resourcePingAccessAccessTokenValidatorReadData(d *schema.ResourceData) *models.AccessTokenValidatorView { config := d.Get("configuration").(string) var dat map[string]interface{} _ = json.Unmarshal([]byte(config), &dat) - atv := &pingaccess.AccessTokenValidatorView{ + atv := &models.AccessTokenValidatorView{ Name: String(d.Get("name").(string)), ClassName: String(d.Get("class_name").(string)), Configuration: dat, diff --git a/pingaccess/resource_pingaccess_access_token_validator_test.go b/pingaccess/resource_pingaccess_access_token_validator_test.go index 465dfefa..f484bbd8 100644 --- a/pingaccess/resource_pingaccess_access_token_validator_test.go +++ b/pingaccess/resource_pingaccess_access_token_validator_test.go @@ -5,9 +5,10 @@ import ( "regexp" "testing" + "github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessAccessTokenValidator(t *testing.T) { @@ -95,8 +96,8 @@ func testAccCheckPingAccessAccessTokenValidatorExists(n string) resource.TestChe return fmt.Errorf("No access_token_validator ID is set") } - conn := testAccProvider.Meta().(*pingaccess.Client).AccessTokenValidators - result, _, err := conn.GetAccessTokenValidatorCommand(&pingaccess.GetAccessTokenValidatorCommandInput{ + conn := testAccProvider.Meta().(paClient).AccessTokenValidators + result, _, err := conn.GetAccessTokenValidatorCommand(&accessTokenValidators.GetAccessTokenValidatorCommandInput{ Id: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_acme_server.go b/pingaccess/resource_pingaccess_acme_server.go index b1ed922a..2491f149 100644 --- a/pingaccess/resource_pingaccess_acme_server.go +++ b/pingaccess/resource_pingaccess_acme_server.go @@ -3,9 +3,11 @@ package pingaccess import ( "context" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/acme" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessAcmeServer() *schema.Resource { @@ -38,8 +40,8 @@ func resourcePingAccessAcmeServerSchema() map[string]*schema.Schema { } func resourcePingAccessAcmeServerCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).Acme - input := pingaccess.AddAcmeServerCommandInput{ + svc := m.(paClient).Acme + input := acme.AddAcmeServerCommandInput{ Body: *resourcePingAccessAcmeServerReadData(d), } @@ -52,8 +54,8 @@ func resourcePingAccessAcmeServerCreate(_ context.Context, d *schema.ResourceDat } func resourcePingAccessAcmeServerRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).Acme - input := &pingaccess.GetAcmeServerCommandInput{ + svc := m.(paClient).Acme + input := &acme.GetAcmeServerCommandInput{ AcmeServerId: d.Id(), } result, _, err := svc.GetAcmeServerCommand(input) @@ -64,8 +66,8 @@ func resourcePingAccessAcmeServerRead(_ context.Context, d *schema.ResourceData, } func resourcePingAccessAcmeServerDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).Acme - input := &pingaccess.DeleteAcmeServerCommandInput{ + svc := m.(paClient).Acme + input := &acme.DeleteAcmeServerCommandInput{ AcmeServerId: d.Id(), } @@ -76,7 +78,7 @@ func resourcePingAccessAcmeServerDelete(_ context.Context, d *schema.ResourceDat return nil } -func resourcePingAccessAcmeServerReadResult(d *schema.ResourceData, input *pingaccess.AcmeServerView) diag.Diagnostics { +func resourcePingAccessAcmeServerReadResult(d *schema.ResourceData, input *models.AcmeServerView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) setResourceDataStringWithDiagnostic(d, "url", input.Url, &diags) @@ -88,8 +90,8 @@ func resourcePingAccessAcmeServerReadResult(d *schema.ResourceData, input *pinga return diags } -func resourcePingAccessAcmeServerReadData(d *schema.ResourceData) *pingaccess.AcmeServerView { - return &pingaccess.AcmeServerView{ +func resourcePingAccessAcmeServerReadData(d *schema.ResourceData) *models.AcmeServerView { + return &models.AcmeServerView{ Name: String(d.Get("name").(string)), Url: String(d.Get("url").(string)), } diff --git a/pingaccess/resource_pingaccess_acme_server_test.go b/pingaccess/resource_pingaccess_acme_server_test.go index fc44ff47..229c0e69 100644 --- a/pingaccess/resource_pingaccess_acme_server_test.go +++ b/pingaccess/resource_pingaccess_acme_server_test.go @@ -4,11 +4,13 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/acme" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessAcmeServer(t *testing.T) { @@ -57,8 +59,8 @@ func testAccCheckPingAccessAcmeServerExists(n string) resource.TestCheckFunc { return fmt.Errorf("No AcmeServer ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Acme - result, _, err := conn.GetAcmeServerCommand(&pa.GetAcmeServerCommandInput{ + conn := testAccProvider.Meta().(paClient).Acme + result, _, err := conn.GetAcmeServerCommand(&acme.GetAcmeServerCommandInput{ AcmeServerId: rs.Primary.ID, }) @@ -75,10 +77,10 @@ func testAccCheckPingAccessAcmeServerExists(n string) resource.TestCheckFunc { func Test_resourcePingAccessAcmeServerReadData(t *testing.T) { cases := []struct { - AcmeServer pa.AcmeServerView + AcmeServer models.AcmeServerView }{ { - AcmeServer: pa.AcmeServerView{ + AcmeServer: models.AcmeServerView{ Name: String("engine1"), Url: String("foo"), }, diff --git a/pingaccess/resource_pingaccess_application.go b/pingaccess/resource_pingaccess_application.go index 8761b590..d2e05d43 100644 --- a/pingaccess/resource_pingaccess_application.go +++ b/pingaccess/resource_pingaccess_application.go @@ -4,10 +4,12 @@ import ( "context" "strconv" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/applications" + "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessApplication() *schema.Resource { @@ -133,8 +135,8 @@ func resourcePingAccessApplicationSchema() map[string]*schema.Schema { } func resourcePingAccessApplicationCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Applications - input := &pa.AddApplicationCommandInput{ + svc := m.(paClient).Applications + input := &applications.AddApplicationCommandInput{ Body: *resourcePingAccessApplicationReadData(d), } result, _, err := svc.AddApplicationCommand(input) @@ -147,8 +149,8 @@ func resourcePingAccessApplicationCreate(_ context.Context, d *schema.ResourceDa } func resourcePingAccessApplicationRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Applications - input := &pa.GetApplicationCommandInput{ + svc := m.(paClient).Applications + input := &applications.GetApplicationCommandInput{ Id: d.Id(), } result, _, err := svc.GetApplicationCommand(input) @@ -159,8 +161,8 @@ func resourcePingAccessApplicationRead(_ context.Context, d *schema.ResourceData } func resourcePingAccessApplicationUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Applications - input := pa.UpdateApplicationCommandInput{ + svc := m.(paClient).Applications + input := applications.UpdateApplicationCommandInput{ Body: *resourcePingAccessApplicationReadData(d), Id: d.Id(), } @@ -172,9 +174,9 @@ func resourcePingAccessApplicationUpdate(_ context.Context, d *schema.ResourceDa } func resourcePingAccessApplicationDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Applications + svc := m.(paClient).Applications - input := &pa.DeleteApplicationCommandInput{ + input := &applications.DeleteApplicationCommandInput{ Id: d.Id(), } @@ -185,7 +187,7 @@ func resourcePingAccessApplicationDelete(_ context.Context, d *schema.ResourceDa return nil } -func resourcePingAccessApplicationReadResult(d *schema.ResourceData, rv *pa.ApplicationView) diag.Diagnostics { +func resourcePingAccessApplicationReadResult(d *schema.ResourceData, rv *models.ApplicationView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataIntWithDiagnostic(d, "access_validator_id", rv.AccessValidatorId, &diags) setResourceDataIntWithDiagnostic(d, "agent_id", rv.AgentId, &diags) @@ -229,7 +231,7 @@ func resourcePingAccessApplicationReadResult(d *schema.ResourceData, rv *pa.Appl return diags } -func resourcePingAccessApplicationReadData(d *schema.ResourceData) *pa.ApplicationView { +func resourcePingAccessApplicationReadData(d *schema.ResourceData) *models.ApplicationView { siteID, _ := strconv.Atoi(d.Get("site_id").(string)) virtualHostIds := expandStringList(d.Get("virtual_host_ids").(*schema.Set).List()) var vhIds []*int @@ -238,7 +240,7 @@ func resourcePingAccessApplicationReadData(d *schema.ResourceData) *pa.Applicati vhIds = append(vhIds, &text) } - application := &pa.ApplicationView{ + application := &models.ApplicationView{ AgentId: Int(d.Get("agent_id").(int)), Name: String(d.Get("name").(string)), ApplicationType: String(d.Get("application_type").(string)), diff --git a/pingaccess/resource_pingaccess_application_resource.go b/pingaccess/resource_pingaccess_application_resource.go index 50fddc48..43581071 100644 --- a/pingaccess/resource_pingaccess_application_resource.go +++ b/pingaccess/resource_pingaccess_application_resource.go @@ -6,9 +6,11 @@ import ( "strconv" "strings" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/applications" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessApplicationResource() *schema.Resource { @@ -104,12 +106,12 @@ func resourcePingAccessApplicationResourceSchema() map[string]*schema.Schema { func resourcePingAccessApplicationResourceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { var diags diag.Diagnostics - svc := m.(*pa.Client).Applications + svc := m.(paClient).Applications applicationID := d.Get("application_id").(string) if d.Get("root_resource").(bool) { //Root Resources are created automatically, if one exists we will import it instead of failing to create. - input := pa.GetApplicationResourcesCommandInput{ + input := applications.GetApplicationResourcesCommandInput{ Id: applicationID, Name: "Root Resource", } @@ -123,7 +125,7 @@ func resourcePingAccessApplicationResourceCreate(ctx context.Context, d *schema. return resourcePingAccessApplicationResourceUpdate(ctx, d, m) } - input := pa.AddApplicationResourceCommandInput{ + input := applications.AddApplicationResourceCommandInput{ Id: applicationID, Body: *resourcePingAccessApplicationResourceReadData(d), } @@ -138,8 +140,8 @@ func resourcePingAccessApplicationResourceCreate(ctx context.Context, d *schema. } func resourcePingAccessApplicationResourceRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Applications - input := &pa.GetApplicationResourceCommandInput{ + svc := m.(paClient).Applications + input := &applications.GetApplicationResourceCommandInput{ ApplicationId: d.Get("application_id").(string), ResourceId: d.Id(), } @@ -153,8 +155,8 @@ func resourcePingAccessApplicationResourceRead(_ context.Context, d *schema.Reso } func resourcePingAccessApplicationResourceUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Applications - input := pa.UpdateApplicationResourceCommandInput{ + svc := m.(paClient).Applications + input := applications.UpdateApplicationResourceCommandInput{ ApplicationId: d.Get("application_id").(string), ResourceId: d.Id(), Body: *resourcePingAccessApplicationResourceReadData(d), @@ -168,13 +170,13 @@ func resourcePingAccessApplicationResourceUpdate(_ context.Context, d *schema.Re } func resourcePingAccessApplicationResourceDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Applications + svc := m.(paClient).Applications if d.Get("root_resource").(bool) { return nil } - input := &pa.DeleteApplicationResourceCommandInput{ + input := &applications.DeleteApplicationResourceCommandInput{ ResourceId: d.Id(), ApplicationId: d.Get("application_id").(string), } @@ -196,7 +198,7 @@ func resourcePingAccessApplicationResourceImport(_ context.Context, d *schema.Re return []*schema.ResourceData{d}, nil } -func resourcePingAccessApplicationResourceReadResult(d *schema.ResourceData, rv *pa.ResourceView) diag.Diagnostics { +func resourcePingAccessApplicationResourceReadResult(d *schema.ResourceData, rv *models.ResourceView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataBoolWithDiagnostic(d, "anonymous", rv.Anonymous, &diags) setResourceDataStringWithDiagnostic(d, "application_id", String(strconv.Itoa(*rv.ApplicationId)), &diags) @@ -225,10 +227,10 @@ func resourcePingAccessApplicationResourceReadResult(d *schema.ResourceData, rv return diags } -func resourcePingAccessApplicationResourceReadData(d *schema.ResourceData) *pa.ResourceView { +func resourcePingAccessApplicationResourceReadData(d *schema.ResourceData) *models.ResourceView { methods := expandStringList(d.Get("methods").(*schema.Set).List()) - resource := &pa.ResourceView{ + resource := &models.ResourceView{ Name: String(d.Get("name").(string)), Methods: &methods, } @@ -261,7 +263,7 @@ func resourcePingAccessApplicationResourceReadData(d *schema.ResourceData) *pa.R pathPatterns := v.(*schema.Set).List() for _, raw := range pathPatterns { l := raw.(map[string]interface{}) - p := &pa.PathPatternView{ + p := &models.PathPatternView{ Pattern: String(l["pattern"].(string)), Type: String(l["type"].(string)), } diff --git a/pingaccess/resource_pingaccess_application_resource_test.go b/pingaccess/resource_pingaccess_application_resource_test.go index 47f12023..bc86b7ad 100644 --- a/pingaccess/resource_pingaccess_application_resource_test.go +++ b/pingaccess/resource_pingaccess_application_resource_test.go @@ -1,40 +1,21 @@ package pingaccess import ( - "crypto/tls" "encoding/json" "fmt" "log" - "net/http" - "net/url" "strconv" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/applications" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) -func init() { - resource.AddTestSweepers("pingaccess_application_resource", &resource.Sweeper{ - Name: "pingaccess_application_resource", - F: testSweepApplicationResources, - }) -} - -func testSweepApplicationResources(r string) error { - url, _ := url.Parse("https://localhost:9000") - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - conn := pa.NewClient("Administrator", "2Access2", url, "/pa-admin-api/v3", nil).Applications - result, _, _ := conn.GetResourcesCommand(&pa.GetResourcesCommandInput{Filter: "acc_test_"}) - for _, v := range result.Items { - conn.DeleteApplicationResourceCommand(&pa.DeleteApplicationResourceCommandInput{ApplicationId: strconv.Itoa(*v.ApplicationId), ResourceId: v.Id.String()}) - } - return nil -} - func TestAccPingAccessApplicationResource(t *testing.T) { policy1 := `web { type = "Rule" @@ -233,8 +214,8 @@ func testAccCheckPingAccessApplicationResourceExists(n string) resource.TestChec return fmt.Errorf("No application resource ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Applications - result, _, err := conn.GetApplicationResourceCommand(&pa.GetApplicationResourceCommandInput{ + conn := testAccProvider.Meta().(paClient).Applications + result, _, err := conn.GetApplicationResourceCommand(&applications.GetApplicationResourceCommandInput{ ApplicationId: rs.Primary.Attributes["application_id"], ResourceId: rs.Primary.ID, }) @@ -259,8 +240,8 @@ func testAccCheckPingAccessApplicationResourceAttributes(n, name, context string return fmt.Errorf("Not found: %s", n) } - conn := testAccProvider.Meta().(*pa.Client).Applications - result, _, err := conn.GetApplicationResourceCommand(&pa.GetApplicationResourceCommandInput{ + conn := testAccProvider.Meta().(paClient).Applications + result, _, err := conn.GetApplicationResourceCommand(&applications.GetApplicationResourceCommandInput{ ApplicationId: rs.Primary.Attributes["application_id"], ResourceId: rs.Primary.ID, }) @@ -287,10 +268,10 @@ func testAccCheckPingAccessApplicationResourceAttributes(n, name, context string func Test_resourcePingAccessApplicationResourceReadData(t *testing.T) { cases := []struct { - Resource pa.ResourceView + Resource models.ResourceView }{ { - Resource: pa.ResourceView{ + Resource: models.ResourceView{ Anonymous: Bool(false), ApplicationId: Int(0), AuditLevel: String("false"), @@ -305,7 +286,7 @@ func Test_resourcePingAccessApplicationResourceReadData(t *testing.T) { // }, // }, PathPrefixes: &[]*string{String("false")}, - Policy: map[string]*[]*pa.PolicyItem{ + Policy: map[string]*[]*models.PolicyItem{ "Web": { { Id: json.Number("1"), @@ -323,7 +304,7 @@ func Test_resourcePingAccessApplicationResourceReadData(t *testing.T) { }, }, { - Resource: pa.ResourceView{ + Resource: models.ResourceView{ ApplicationId: Int(0), Methods: &[]*string{String("GET")}, Name: String("false"), diff --git a/pingaccess/resource_pingaccess_application_test.go b/pingaccess/resource_pingaccess_application_test.go index bd044de3..cebadc96 100644 --- a/pingaccess/resource_pingaccess_application_test.go +++ b/pingaccess/resource_pingaccess_application_test.go @@ -6,16 +6,16 @@ import ( "reflect" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/applications" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessApplication(t *testing.T) { - var out pa.ApplicationView - resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -24,13 +24,13 @@ func TestAccPingAccessApplication(t *testing.T) { { Config: testAccPingAccessApplicationConfig("acc_test_bar", "/bar", "API"), Check: resource.ComposeTestCheckFunc( - testAccCheckPingAccessApplicationExists("pingaccess_application.acc_test", 3, &out), + testAccCheckPingAccessApplicationExists("pingaccess_application.acc_test"), ), }, { Config: testAccPingAccessApplicationConfig("acc_test_bar", "/bart", "Web"), Check: resource.ComposeTestCheckFunc( - testAccCheckPingAccessApplicationExists("pingaccess_application.acc_test", 6, &out), + testAccCheckPingAccessApplicationExists("pingaccess_application.acc_test"), ), }, }, @@ -169,7 +169,7 @@ func testAccPingAccessApplicationConfig(name, context, appType string) string { `, name, context, appType, appType, os.Getenv("PINGFEDERATE_TEST_IP")) } -func testAccCheckPingAccessApplicationExists(n string, c int64, out *pa.ApplicationView) resource.TestCheckFunc { +func testAccCheckPingAccessApplicationExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -183,8 +183,8 @@ func testAccCheckPingAccessApplicationExists(n string, c int64, out *pa.Applicat return fmt.Errorf("No application ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Applications - result, _, err := conn.GetApplicationCommand(&pa.GetApplicationCommandInput{ + conn := testAccProvider.Meta().(paClient).Applications + result, _, err := conn.GetApplicationCommand(&applications.GetApplicationCommandInput{ Id: rs.Primary.ID, }) @@ -222,10 +222,10 @@ func Test_flattenIdentityMappingIds(t *testing.T) { func Test_resourcePingAccessApplicationReadData(t *testing.T) { cases := []struct { - Application pa.ApplicationView + Application models.ApplicationView }{ { - Application: pa.ApplicationView{ + Application: models.ApplicationView{ Name: String("engine1"), ApplicationType: String("API"), AgentId: Int(0), @@ -235,7 +235,7 @@ func Test_resourcePingAccessApplicationReadData(t *testing.T) { SiteId: Int(0), SpaSupportEnabled: Bool(true), VirtualHostIds: &[]*int{Int(1)}, - Policy: map[string]*[]*pa.PolicyItem{ + Policy: map[string]*[]*models.PolicyItem{ "API": { { Id: "1", diff --git a/pingaccess/resource_pingaccess_auth_token_management.go b/pingaccess/resource_pingaccess_auth_token_management.go index d3c98378..bd636aac 100644 --- a/pingaccess/resource_pingaccess_auth_token_management.go +++ b/pingaccess/resource_pingaccess_auth_token_management.go @@ -3,7 +3,8 @@ package pingaccess import ( "context" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -58,7 +59,7 @@ func resourcePingAccessAuthTokenManagementCreate(ctx context.Context, d *schema. } func resourcePingAccessAuthTokenManagementRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).AuthTokenManagement + svc := m.(paClient).AuthTokenManagement result, _, err := svc.GetAuthTokenManagementCommand() if err != nil { return diag.Errorf("unable to read AuthTokenManagement: %s", err) @@ -68,8 +69,8 @@ func resourcePingAccessAuthTokenManagementRead(_ context.Context, d *schema.Reso } func resourcePingAccessAuthTokenManagementUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).AuthTokenManagement - input := pa.UpdateAuthTokenManagementCommandInput{ + svc := m.(paClient).AuthTokenManagement + input := authTokenManagement.UpdateAuthTokenManagementCommandInput{ Body: *resourcePingAccessAuthTokenManagementReadData(d), } result, _, err := svc.UpdateAuthTokenManagementCommand(&input) @@ -82,7 +83,7 @@ func resourcePingAccessAuthTokenManagementUpdate(_ context.Context, d *schema.Re } func resourcePingAccessAuthTokenManagementDelete(_ context.Context, _ *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).AuthTokenManagement + svc := m.(paClient).AuthTokenManagement _, err := svc.DeleteAuthTokenManagementCommand() if err != nil { return diag.Errorf("unable to delete AuthTokenManagement: %s", err) @@ -91,7 +92,7 @@ func resourcePingAccessAuthTokenManagementDelete(_ context.Context, _ *schema.Re return nil } -func resourcePingAccessAuthTokenManagementReadResult(d *schema.ResourceData, input *pa.AuthTokenManagementView) diag.Diagnostics { +func resourcePingAccessAuthTokenManagementReadResult(d *schema.ResourceData, input *models.AuthTokenManagementView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "issuer", input.Issuer, &diags) setResourceDataBoolWithDiagnostic(d, "key_roll_enabled", input.KeyRollEnabled, &diags) @@ -100,8 +101,8 @@ func resourcePingAccessAuthTokenManagementReadResult(d *schema.ResourceData, inp return diags } -func resourcePingAccessAuthTokenManagementReadData(d *schema.ResourceData) *pa.AuthTokenManagementView { - atm := &pa.AuthTokenManagementView{ +func resourcePingAccessAuthTokenManagementReadData(d *schema.ResourceData) *models.AuthTokenManagementView { + atm := &models.AuthTokenManagementView{ Issuer: String(d.Get("issuer").(string)), KeyRollEnabled: Bool(d.Get("key_roll_enabled").(bool)), KeyRollPeriodInHours: Int(d.Get("key_roll_period_in_hours").(int)), diff --git a/pingaccess/resource_pingaccess_auth_token_management_test.go b/pingaccess/resource_pingaccess_auth_token_management_test.go index 7189fdf0..120a5b2a 100644 --- a/pingaccess/resource_pingaccess_auth_token_management_test.go +++ b/pingaccess/resource_pingaccess_auth_token_management_test.go @@ -4,11 +4,12 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessAuthTokenManagement(t *testing.T) { @@ -58,7 +59,7 @@ func testAccCheckPingAccessAuthTokenManagementExists(n string) resource.TestChec return fmt.Errorf("No auth token management ID is set") } - conn := testAccProvider.Meta().(*pa.Client).AuthTokenManagement + conn := testAccProvider.Meta().(paClient).AuthTokenManagement result, _, err := conn.GetAuthTokenManagementCommand() if err != nil { @@ -75,10 +76,10 @@ func testAccCheckPingAccessAuthTokenManagementExists(n string) resource.TestChec func Test_resourcePingAccessAuthTokenManagementReadData(t *testing.T) { cases := []struct { - AuthTokenManagementView pa.AuthTokenManagementView + AuthTokenManagementView models.AuthTokenManagementView }{ { - AuthTokenManagementView: pa.AuthTokenManagementView{ + AuthTokenManagementView: models.AuthTokenManagementView{ Issuer: String("PingAccessAuthTokenDemo"), KeyRollEnabled: Bool(false), KeyRollPeriodInHours: Int(23), diff --git a/pingaccess/resource_pingaccess_authn_req_list.go b/pingaccess/resource_pingaccess_authn_req_list.go index 07ad820c..669d6cae 100644 --- a/pingaccess/resource_pingaccess_authn_req_list.go +++ b/pingaccess/resource_pingaccess_authn_req_list.go @@ -3,9 +3,11 @@ package pingaccess import ( "context" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessAuthnReqList() *schema.Resource { @@ -33,8 +35,8 @@ func resourcePingAccessAuthnReqListSchema() map[string]*schema.Schema { } func resourcePingAccessAuthnReqListCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).AuthnReqLists - input := pingaccess.AddAuthnReqListCommandInput{ + svc := m.(paClient).AuthnReqLists + input := authnReqLists.AddAuthnReqListCommandInput{ Body: *resourcePingAccessAuthnReqListReadData(d), } @@ -48,8 +50,8 @@ func resourcePingAccessAuthnReqListCreate(_ context.Context, d *schema.ResourceD } func resourcePingAccessAuthnReqListRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).AuthnReqLists - input := &pingaccess.GetAuthnReqListCommandInput{ + svc := m.(paClient).AuthnReqLists + input := &authnReqLists.GetAuthnReqListCommandInput{ Id: d.Id(), } result, _, err := svc.GetAuthnReqListCommand(input) @@ -60,8 +62,8 @@ func resourcePingAccessAuthnReqListRead(_ context.Context, d *schema.ResourceDat } func resourcePingAccessAuthnReqListUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).AuthnReqLists - input := pingaccess.UpdateAuthnReqListCommandInput{ + svc := m.(paClient).AuthnReqLists + input := authnReqLists.UpdateAuthnReqListCommandInput{ Body: *resourcePingAccessAuthnReqListReadData(d), Id: d.Id(), } @@ -74,8 +76,8 @@ func resourcePingAccessAuthnReqListUpdate(_ context.Context, d *schema.ResourceD } func resourcePingAccessAuthnReqListDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).AuthnReqLists - input := &pingaccess.DeleteAuthnReqListCommandInput{ + svc := m.(paClient).AuthnReqLists + input := &authnReqLists.DeleteAuthnReqListCommandInput{ Id: d.Id(), } @@ -86,7 +88,7 @@ func resourcePingAccessAuthnReqListDelete(_ context.Context, d *schema.ResourceD return nil } -func resourcePingAccessAuthnReqListReadResult(d *schema.ResourceData, input *pingaccess.AuthnReqListView) diag.Diagnostics { +func resourcePingAccessAuthnReqListReadResult(d *schema.ResourceData, input *models.AuthnReqListView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) if err := d.Set("authn_reqs", input.AuthnReqs); err != nil { @@ -95,9 +97,9 @@ func resourcePingAccessAuthnReqListReadResult(d *schema.ResourceData, input *pin return diags } -func resourcePingAccessAuthnReqListReadData(d *schema.ResourceData) *pingaccess.AuthnReqListView { +func resourcePingAccessAuthnReqListReadData(d *schema.ResourceData) *models.AuthnReqListView { auths := expandStringList(d.Get("authn_reqs").([]interface{})) - engine := &pingaccess.AuthnReqListView{ + engine := &models.AuthnReqListView{ Name: String(d.Get("name").(string)), AuthnReqs: &auths, } diff --git a/pingaccess/resource_pingaccess_authn_req_list_test.go b/pingaccess/resource_pingaccess_authn_req_list_test.go index cdfbbbd3..202c89c1 100644 --- a/pingaccess/resource_pingaccess_authn_req_list_test.go +++ b/pingaccess/resource_pingaccess_authn_req_list_test.go @@ -4,11 +4,13 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessAuthnReqList(t *testing.T) { @@ -59,8 +61,8 @@ func testAccCheckPingAccessAuthnReqListExists(n string) resource.TestCheckFunc { return fmt.Errorf("No AuthnReqList ID is set") } - conn := testAccProvider.Meta().(*pa.Client).AuthnReqLists - result, _, err := conn.GetAuthnReqListCommand(&pa.GetAuthnReqListCommandInput{ + conn := testAccProvider.Meta().(paClient).AuthnReqLists + result, _, err := conn.GetAuthnReqListCommand(&authnReqLists.GetAuthnReqListCommandInput{ Id: rs.Primary.ID, }) @@ -78,10 +80,10 @@ func testAccCheckPingAccessAuthnReqListExists(n string) resource.TestCheckFunc { func Test_resourcePingAccessAuthnReqListReadData(t *testing.T) { cases := []struct { - AuthnReqList pa.AuthnReqListView + AuthnReqList models.AuthnReqListView }{ { - AuthnReqList: pa.AuthnReqListView{ + AuthnReqList: models.AuthnReqListView{ Name: String("engine1"), AuthnReqs: &[]*string{ String("foo"), diff --git a/pingaccess/resource_pingaccess_certificate.go b/pingaccess/resource_pingaccess_certificate.go index 4c1d44fd..5cf86722 100644 --- a/pingaccess/resource_pingaccess_certificate.go +++ b/pingaccess/resource_pingaccess_certificate.go @@ -4,9 +4,11 @@ import ( "context" "strconv" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/certificates" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessCertificate() *schema.Resource { @@ -77,14 +79,14 @@ func resourcePingAccessCertificateSchema() map[string]*schema.Schema { } func resourcePingAccessCertificateCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - input := pa.ImportTrustedCertInput{ - Body: pa.X509FileImportDocView{ + input := certificates.ImportTrustedCertInput{ + Body: models.X509FileImportDocView{ Alias: String(d.Get("alias").(string)), FileData: String(d.Get("file_data").(string)), }, } - svc := m.(*pa.Client).Certificates + svc := m.(paClient).Certificates result, _, err := svc.ImportTrustedCert(&input) if err != nil { @@ -96,8 +98,8 @@ func resourcePingAccessCertificateCreate(_ context.Context, d *schema.ResourceDa } func resourcePingAccessCertificateRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Certificates - input := &pa.GetTrustedCertInput{ + svc := m.(paClient).Certificates + input := &certificates.GetTrustedCertInput{ Id: d.Id(), } result, _, err := svc.GetTrustedCert(input) @@ -108,15 +110,15 @@ func resourcePingAccessCertificateRead(_ context.Context, d *schema.ResourceData } func resourcePingAccessCertificateUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - input := pa.UpdateTrustedCertInput{ - Body: pa.X509FileImportDocView{ + input := certificates.UpdateTrustedCertInput{ + Body: models.X509FileImportDocView{ Alias: String(d.Get("alias").(string)), FileData: String(d.Get("file_data").(string)), }, Id: d.Id(), } - svc := m.(*pa.Client).Certificates + svc := m.(paClient).Certificates result, _, err := svc.UpdateTrustedCert(&input) if err != nil { @@ -128,15 +130,15 @@ func resourcePingAccessCertificateUpdate(_ context.Context, d *schema.ResourceDa } func resourcePingAccessCertificateDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Certificates - _, err := svc.DeleteTrustedCertCommand(&pa.DeleteTrustedCertCommandInput{Id: d.Id()}) + svc := m.(paClient).Certificates + _, err := svc.DeleteTrustedCertCommand(&certificates.DeleteTrustedCertCommandInput{Id: d.Id()}) if err != nil { return diag.Errorf("unable to delete Certificate: %s", err) } return nil } -func resourcePingAccessCertificateReadResult(d *schema.ResourceData, rv *pa.TrustedCertView) diag.Diagnostics { +func resourcePingAccessCertificateReadResult(d *schema.ResourceData, rv *models.TrustedCertView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "alias", rv.Alias, &diags) setResourceDataIntWithDiagnostic(d, "expires", rv.Expires, &diags) diff --git a/pingaccess/resource_pingaccess_certificate_test.go b/pingaccess/resource_pingaccess_certificate_test.go index 4c4975a8..6888648a 100644 --- a/pingaccess/resource_pingaccess_certificate_test.go +++ b/pingaccess/resource_pingaccess_certificate_test.go @@ -4,9 +4,10 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/services/certificates" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessCertificate(t *testing.T) { @@ -63,8 +64,8 @@ func testAccCheckPingAccessCertificateExists(n string) resource.TestCheckFunc { return fmt.Errorf("No Certificate ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Certificates - result, _, err := conn.GetTrustedCert(&pa.GetTrustedCertInput{ + conn := testAccProvider.Meta().(paClient).Certificates + result, _, err := conn.GetTrustedCert(&certificates.GetTrustedCertInput{ Id: rs.Primary.ID, }) @@ -87,8 +88,8 @@ func testAccCheckPingAccessCertificateAttributes(n string) resource.TestCheckFun return fmt.Errorf("No Certificate ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Certificates - result, _, err := conn.GetTrustedCert(&pa.GetTrustedCertInput{ + conn := testAccProvider.Meta().(paClient).Certificates + result, _, err := conn.GetTrustedCert(&certificates.GetTrustedCertInput{ Id: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_engine_listener.go b/pingaccess/resource_pingaccess_engine_listener.go index 93e89ced..13f8bce9 100644 --- a/pingaccess/resource_pingaccess_engine_listener.go +++ b/pingaccess/resource_pingaccess_engine_listener.go @@ -3,9 +3,11 @@ package pingaccess import ( "context" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/engineListeners" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessEngineListener() *schema.Resource { @@ -46,8 +48,8 @@ func resourcePingAccessEngineListenerSchema() map[string]*schema.Schema { } func resourcePingAccessEngineListenerCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).EngineListeners - input := pa.AddEngineListenerCommandInput{ + svc := m.(paClient).EngineListeners + input := engineListeners.AddEngineListenerCommandInput{ Body: *resourcePingAccessEngineListenerReadData(d), } @@ -61,8 +63,8 @@ func resourcePingAccessEngineListenerCreate(_ context.Context, d *schema.Resourc } func resourcePingAccessEngineListenerRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).EngineListeners - input := &pa.GetEngineListenerCommandInput{ + svc := m.(paClient).EngineListeners + input := &engineListeners.GetEngineListenerCommandInput{ Id: d.Id(), } result, _, err := svc.GetEngineListenerCommand(input) @@ -73,8 +75,8 @@ func resourcePingAccessEngineListenerRead(_ context.Context, d *schema.ResourceD } func resourcePingAccessEngineListenerUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).EngineListeners - input := pa.UpdateEngineListenerCommandInput{ + svc := m.(paClient).EngineListeners + input := engineListeners.UpdateEngineListenerCommandInput{ Body: *resourcePingAccessEngineListenerReadData(d), Id: d.Id(), } @@ -87,8 +89,8 @@ func resourcePingAccessEngineListenerUpdate(_ context.Context, d *schema.Resourc } func resourcePingAccessEngineListenerDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).EngineListeners - input := &pa.DeleteEngineListenerCommandInput{ + svc := m.(paClient).EngineListeners + input := &engineListeners.DeleteEngineListenerCommandInput{ Id: d.Id(), } @@ -100,7 +102,7 @@ func resourcePingAccessEngineListenerDelete(_ context.Context, d *schema.Resourc return nil } -func resourcePingAccessEngineListenerReadResult(d *schema.ResourceData, input *pa.EngineListenerView) diag.Diagnostics { +func resourcePingAccessEngineListenerReadResult(d *schema.ResourceData, input *models.EngineListenerView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) setResourceDataIntWithDiagnostic(d, "port", input.Port, &diags) @@ -109,8 +111,8 @@ func resourcePingAccessEngineListenerReadResult(d *schema.ResourceData, input *p return diags } -func resourcePingAccessEngineListenerReadData(d *schema.ResourceData) *pa.EngineListenerView { - engine := &pa.EngineListenerView{ +func resourcePingAccessEngineListenerReadData(d *schema.ResourceData) *models.EngineListenerView { + engine := &models.EngineListenerView{ Name: String(d.Get("name").(string)), Port: Int(d.Get("port").(int)), } diff --git a/pingaccess/resource_pingaccess_engine_listener_test.go b/pingaccess/resource_pingaccess_engine_listener_test.go index 9c27508f..c6ef29a8 100644 --- a/pingaccess/resource_pingaccess_engine_listener_test.go +++ b/pingaccess/resource_pingaccess_engine_listener_test.go @@ -4,11 +4,13 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/engineListeners" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessEngineListener(t *testing.T) { @@ -57,8 +59,8 @@ func testAccCheckPingAccessEngineListenerExists(n string) resource.TestCheckFunc return fmt.Errorf("No EngineListener ID is set") } - conn := testAccProvider.Meta().(*pa.Client).EngineListeners - result, _, err := conn.GetEngineListenerCommand(&pa.GetEngineListenerCommandInput{ + conn := testAccProvider.Meta().(paClient).EngineListeners + result, _, err := conn.GetEngineListenerCommand(&engineListeners.GetEngineListenerCommandInput{ Id: rs.Primary.ID, }) @@ -76,10 +78,10 @@ func testAccCheckPingAccessEngineListenerExists(n string) resource.TestCheckFunc func Test_resourcePingAccessEngineListenerReadData(t *testing.T) { cases := []struct { - EngineListener pa.EngineListenerView + EngineListener models.EngineListenerView }{ { - EngineListener: pa.EngineListenerView{ + EngineListener: models.EngineListenerView{ Name: String("engine1"), Port: Int(9999), Secure: Bool(true), @@ -87,7 +89,7 @@ func Test_resourcePingAccessEngineListenerReadData(t *testing.T) { }, }, { - EngineListener: pa.EngineListenerView{ + EngineListener: models.EngineListenerView{ Name: String("engine2"), Port: Int(9999), Secure: Bool(false), diff --git a/pingaccess/resource_pingaccess_hsm_provider.go b/pingaccess/resource_pingaccess_hsm_provider.go index 8e1b3d06..1305f1fa 100644 --- a/pingaccess/resource_pingaccess_hsm_provider.go +++ b/pingaccess/resource_pingaccess_hsm_provider.go @@ -5,9 +5,11 @@ import ( "encoding/json" "fmt" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessHsmProvider() *schema.Resource { @@ -21,7 +23,7 @@ func resourcePingAccessHsmProvider() *schema.Resource { }, Schema: resourcePingAccessHsmProviderSchema(), CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { - svc := m.(*pingaccess.Client).HsmProviders + svc := m.(paClient).HsmProviders desc, _, err := svc.GetHsmProviderDescriptorsCommand() if err != nil { return fmt.Errorf("unable to retrieve HsmProvider descriptors %s", err) @@ -54,8 +56,8 @@ func resourcePingAccessHsmProviderSchema() map[string]*schema.Schema { } func resourcePingAccessHsmProviderCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).HsmProviders - input := pingaccess.AddHsmProviderCommandInput{ + svc := m.(paClient).HsmProviders + input := hsmProviders.AddHsmProviderCommandInput{ Body: *resourcePingAccessHsmProviderReadData(d), } @@ -69,8 +71,8 @@ func resourcePingAccessHsmProviderCreate(_ context.Context, d *schema.ResourceDa } func resourcePingAccessHsmProviderRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).HsmProviders - input := &pingaccess.GetHsmProviderCommandInput{ + svc := m.(paClient).HsmProviders + input := &hsmProviders.GetHsmProviderCommandInput{ Id: d.Id(), } result, _, err := svc.GetHsmProviderCommand(input) @@ -81,8 +83,8 @@ func resourcePingAccessHsmProviderRead(_ context.Context, d *schema.ResourceData } func resourcePingAccessHsmProviderUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).HsmProviders - input := pingaccess.UpdateHsmProviderCommandInput{ + svc := m.(paClient).HsmProviders + input := hsmProviders.UpdateHsmProviderCommandInput{ Body: *resourcePingAccessHsmProviderReadData(d), Id: d.Id(), } @@ -95,15 +97,15 @@ func resourcePingAccessHsmProviderUpdate(_ context.Context, d *schema.ResourceDa } func resourcePingAccessHsmProviderDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).HsmProviders - _, err := svc.DeleteHsmProviderCommand(&pingaccess.DeleteHsmProviderCommandInput{Id: d.Id()}) + svc := m.(paClient).HsmProviders + _, err := svc.DeleteHsmProviderCommand(&hsmProviders.DeleteHsmProviderCommandInput{Id: d.Id()}) if err != nil { return diag.Errorf("unable to delete HsmProvider: %s", err) } return nil } -func resourcePingAccessHsmProviderReadResult(d *schema.ResourceData, input *pingaccess.HsmProviderView, svc pingaccess.HsmProvidersAPI) diag.Diagnostics { +func resourcePingAccessHsmProviderReadResult(d *schema.ResourceData, input *models.HsmProviderView, svc hsmProviders.HsmProvidersAPI) diag.Diagnostics { var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) @@ -123,11 +125,11 @@ func resourcePingAccessHsmProviderReadResult(d *schema.ResourceData, input *ping return diags } -func resourcePingAccessHsmProviderReadData(d *schema.ResourceData) *pingaccess.HsmProviderView { +func resourcePingAccessHsmProviderReadData(d *schema.ResourceData) *models.HsmProviderView { config := d.Get("configuration").(string) var dat map[string]interface{} _ = json.Unmarshal([]byte(config), &dat) - hsmProvider := &pingaccess.HsmProviderView{ + hsmProvider := &models.HsmProviderView{ Name: String(d.Get("name").(string)), ClassName: String(d.Get("class_name").(string)), Configuration: dat, diff --git a/pingaccess/resource_pingaccess_hsm_provider_test.go b/pingaccess/resource_pingaccess_hsm_provider_test.go index 9544a8c9..fc3571cf 100644 --- a/pingaccess/resource_pingaccess_hsm_provider_test.go +++ b/pingaccess/resource_pingaccess_hsm_provider_test.go @@ -5,9 +5,10 @@ import ( "regexp" "testing" + "github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessHsmProvider(t *testing.T) { @@ -76,8 +77,8 @@ func testAccCheckPingAccessHsmProviderExists(n string) resource.TestCheckFunc { return fmt.Errorf("No HsmProvider ID is set") } - conn := testAccProvider.Meta().(*pa.Client).HsmProviders - result, _, err := conn.GetHsmProviderCommand(&pa.GetHsmProviderCommandInput{ + conn := testAccProvider.Meta().(paClient).HsmProviders + result, _, err := conn.GetHsmProviderCommand(&hsmProviders.GetHsmProviderCommandInput{ Id: rs.Primary.ID, }) @@ -100,8 +101,8 @@ func testAccCheckPingAccessHsmProviderAttributes(n, partition string) resource.T return fmt.Errorf("No HsmProvider ID is set") } - conn := testAccProvider.Meta().(*pa.Client).HsmProviders - result, _, err := conn.GetHsmProviderCommand(&pa.GetHsmProviderCommandInput{ + conn := testAccProvider.Meta().(paClient).HsmProviders + result, _, err := conn.GetHsmProviderCommand(&hsmProviders.GetHsmProviderCommandInput{ Id: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_http_config_request_host_source.go b/pingaccess/resource_pingaccess_http_config_request_host_source.go index 5b111ea6..217ed99e 100644 --- a/pingaccess/resource_pingaccess_http_config_request_host_source.go +++ b/pingaccess/resource_pingaccess_http_config_request_host_source.go @@ -3,9 +3,11 @@ package pingaccess import ( "context" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/httpConfig" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessHTTPConfigRequestHostSource() *schema.Resource { @@ -37,7 +39,7 @@ func resourcePingAccessHTTPConfigRequestHostSourceCreate(ctx context.Context, d } func resourcePingAccessHTTPConfigRequestHostSourceRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).HttpConfig + svc := m.(paClient).HttpConfig result, _, err := svc.GetHostSourceCommand() if err != nil { return diag.Errorf("unable to read HttpConfigHostSource: %s", err) @@ -46,8 +48,8 @@ func resourcePingAccessHTTPConfigRequestHostSourceRead(_ context.Context, d *sch } func resourcePingAccessHTTPConfigRequestHostSourceUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).HttpConfig - input := &pa.UpdateHostSourceCommandInput{Body: *resourcePingAccessHTTPConfigRequestHostSourceReadData(d)} + svc := m.(paClient).HttpConfig + input := &httpConfig.UpdateHostSourceCommandInput{Body: *resourcePingAccessHTTPConfigRequestHostSourceReadData(d)} result, _, err := svc.UpdateHostSourceCommand(input) if err != nil { return diag.Errorf("unable to update HttpConfigHostSource: %s", err) @@ -58,7 +60,7 @@ func resourcePingAccessHTTPConfigRequestHostSourceUpdate(_ context.Context, d *s } func resourcePingAccessHTTPConfigRequestHostSourceDelete(_ context.Context, _ *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).HttpConfig + svc := m.(paClient).HttpConfig _, err := svc.DeleteHostSourceCommand() if err != nil { return diag.Errorf("unable to delete HttpConfigHostSource: %s", err) @@ -67,7 +69,7 @@ func resourcePingAccessHTTPConfigRequestHostSourceDelete(_ context.Context, _ *s return nil } -func resourcePingAccessHTTPConfigRequestHostSourceReadResult(d *schema.ResourceData, rv *pa.HostMultiValueSourceView) diag.Diagnostics { +func resourcePingAccessHTTPConfigRequestHostSourceReadResult(d *schema.ResourceData, rv *models.HostMultiValueSourceView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "list_value_location", rv.ListValueLocation, &diags) if err := d.Set("header_name_list", rv.HeaderNameList); err != nil { @@ -76,9 +78,9 @@ func resourcePingAccessHTTPConfigRequestHostSourceReadResult(d *schema.ResourceD return diags } -func resourcePingAccessHTTPConfigRequestHostSourceReadData(d *schema.ResourceData) (body *pa.HostMultiValueSourceView) { +func resourcePingAccessHTTPConfigRequestHostSourceReadData(d *schema.ResourceData) (body *models.HostMultiValueSourceView) { headerNameList := expandStringList(d.Get("header_name_list").([]interface{})) - body = &pa.HostMultiValueSourceView{ + body = &models.HostMultiValueSourceView{ HeaderNameList: &headerNameList, ListValueLocation: String(d.Get("list_value_location").(string)), } diff --git a/pingaccess/resource_pingaccess_http_config_request_host_source_test.go b/pingaccess/resource_pingaccess_http_config_request_host_source_test.go index 70fb988c..a14a2641 100644 --- a/pingaccess/resource_pingaccess_http_config_request_host_source_test.go +++ b/pingaccess/resource_pingaccess_http_config_request_host_source_test.go @@ -4,11 +4,12 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessHTTPConfigRequestHostSource(t *testing.T) { @@ -59,7 +60,7 @@ func testAccCheckPingAccessHTTPConfigRequestHostSourceExists(n string) resource. return fmt.Errorf("No http config request host source response ID is set") } - conn := testAccProvider.Meta().(*pa.Client).HttpConfig + conn := testAccProvider.Meta().(paClient).HttpConfig result, _, err := conn.GetHostSourceCommand() if err != nil { @@ -76,10 +77,10 @@ func testAccCheckPingAccessHTTPConfigRequestHostSourceExists(n string) resource. func Test_resourcePingAccessHTTPConfigRequestHostSourceReadData(t *testing.T) { cases := []struct { - Resource pa.HostMultiValueSourceView + Resource models.HostMultiValueSourceView }{ { - Resource: pa.HostMultiValueSourceView{ + Resource: models.HostMultiValueSourceView{ HeaderNameList: &[]*string{String("woot")}, ListValueLocation: String("FIRST"), }, diff --git a/pingaccess/resource_pingaccess_https_listener.go b/pingaccess/resource_pingaccess_https_listener.go index be180ea8..c195df29 100644 --- a/pingaccess/resource_pingaccess_https_listener.go +++ b/pingaccess/resource_pingaccess_https_listener.go @@ -3,9 +3,11 @@ package pingaccess import ( "context" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessHTTPSListener() *schema.Resource { @@ -42,8 +44,8 @@ func resourcePingAccessHTTPSListenerSchema() map[string]*schema.Schema { } func resourcePingAccessHTTPSListenerCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).HttpsListeners - input := pa.GetHttpsListenersCommandInput{} + svc := m.(paClient).HttpsListeners + input := httpsListeners.GetHttpsListenersCommandInput{} result, _, err := svc.GetHttpsListenersCommand(&input) if err != nil { @@ -61,8 +63,8 @@ func resourcePingAccessHTTPSListenerCreate(_ context.Context, d *schema.Resource } func resourcePingAccessHTTPSListenerRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).HttpsListeners - input := &pa.GetHttpsListenerCommandInput{ + svc := m.(paClient).HttpsListeners + input := &httpsListeners.GetHttpsListenerCommandInput{ Id: d.Id(), } result, _, err := svc.GetHttpsListenerCommand(input) @@ -73,8 +75,8 @@ func resourcePingAccessHTTPSListenerRead(_ context.Context, d *schema.ResourceDa } func resourcePingAccessHTTPSListenerUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).HttpsListeners - input := pa.UpdateHttpsListenerInput{ + svc := m.(paClient).HttpsListeners + input := httpsListeners.UpdateHttpsListenerInput{ Body: *resourcePingAccessHTTPSListenerReadData(d), Id: d.Id(), } @@ -90,7 +92,7 @@ func resourcePingAccessHTTPSListenerDelete(_ context.Context, _ *schema.Resource return nil } -func resourcePingAccessHTTPSListenerReadResult(d *schema.ResourceData, input *pa.HttpsListenerView) diag.Diagnostics { +func resourcePingAccessHTTPSListenerReadResult(d *schema.ResourceData, input *models.HttpsListenerView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) setResourceDataIntWithDiagnostic(d, "key_pair_id", input.KeyPairId, &diags) @@ -98,8 +100,8 @@ func resourcePingAccessHTTPSListenerReadResult(d *schema.ResourceData, input *pa return diags } -func resourcePingAccessHTTPSListenerReadData(d *schema.ResourceData) *pa.HttpsListenerView { - engine := &pa.HttpsListenerView{ +func resourcePingAccessHTTPSListenerReadData(d *schema.ResourceData) *models.HttpsListenerView { + engine := &models.HttpsListenerView{ Name: String(d.Get("name").(string)), KeyPairId: Int(d.Get("key_pair_id").(int)), UseServerCipherSuiteOrder: Bool(d.Get("use_server_cipher_suite_order").(bool)), diff --git a/pingaccess/resource_pingaccess_https_listener_test.go b/pingaccess/resource_pingaccess_https_listener_test.go index 29914e3d..14bc9d0e 100644 --- a/pingaccess/resource_pingaccess_https_listener_test.go +++ b/pingaccess/resource_pingaccess_https_listener_test.go @@ -4,11 +4,13 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessHTTPSListener(t *testing.T) { @@ -57,8 +59,8 @@ func testAccCheckPingAccessHTTPSListenerExists(n string) resource.TestCheckFunc return fmt.Errorf("No listener ID is set") } - conn := testAccProvider.Meta().(*pa.Client).HttpsListeners - result, _, err := conn.GetHttpsListenerCommand(&pa.GetHttpsListenerCommandInput{ + conn := testAccProvider.Meta().(paClient).HttpsListeners + result, _, err := conn.GetHttpsListenerCommand(&httpsListeners.GetHttpsListenerCommandInput{ Id: rs.Primary.ID, }) @@ -76,10 +78,10 @@ func testAccCheckPingAccessHTTPSListenerExists(n string) resource.TestCheckFunc func Test_resourcePingAccessHTTPSListenerReadData(t *testing.T) { cases := []struct { - listener pa.HttpsListenerView + listener models.HttpsListenerView }{ { - listener: pa.HttpsListenerView{ + listener: models.HttpsListenerView{ Name: String("ADMIN"), KeyPairId: Int(1), UseServerCipherSuiteOrder: Bool(true), diff --git a/pingaccess/resource_pingaccess_identity_mapping.go b/pingaccess/resource_pingaccess_identity_mapping.go index 9375cb1e..99f65ea7 100644 --- a/pingaccess/resource_pingaccess_identity_mapping.go +++ b/pingaccess/resource_pingaccess_identity_mapping.go @@ -6,10 +6,12 @@ import ( "fmt" "log" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/identityMappings" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessIdentityMapping() *schema.Resource { @@ -24,7 +26,7 @@ func resourcePingAccessIdentityMapping() *schema.Resource { Schema: resourcePingAccessIdentityMappingSchema(), CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { - svc := m.(*pingaccess.Client).IdentityMappings + svc := m.(paClient).IdentityMappings desc, _, err := svc.GetIdentityMappingDescriptorsCommand() if err != nil { return fmt.Errorf("unable to retrieve IdentityMapping descriptors %s", err) @@ -57,8 +59,8 @@ func resourcePingAccessIdentityMappingSchema() map[string]*schema.Schema { } func resourcePingAccessIdentityMappingCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).IdentityMappings - input := pingaccess.AddIdentityMappingCommandInput{ + svc := m.(paClient).IdentityMappings + input := identityMappings.AddIdentityMappingCommandInput{ Body: *resourcePingAccessIdentityMappingReadData(d), } @@ -72,8 +74,8 @@ func resourcePingAccessIdentityMappingCreate(_ context.Context, d *schema.Resour } func resourcePingAccessIdentityMappingRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).IdentityMappings - input := &pingaccess.GetIdentityMappingCommandInput{ + svc := m.(paClient).IdentityMappings + input := &identityMappings.GetIdentityMappingCommandInput{ Id: d.Id(), } result, _, err := svc.GetIdentityMappingCommand(input) @@ -85,8 +87,8 @@ func resourcePingAccessIdentityMappingRead(_ context.Context, d *schema.Resource } func resourcePingAccessIdentityMappingUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).IdentityMappings - input := pingaccess.UpdateIdentityMappingCommandInput{ + svc := m.(paClient).IdentityMappings + input := identityMappings.UpdateIdentityMappingCommandInput{ Body: *resourcePingAccessIdentityMappingReadData(d), Id: d.Id(), } @@ -101,16 +103,16 @@ func resourcePingAccessIdentityMappingUpdate(_ context.Context, d *schema.Resour } func resourcePingAccessIdentityMappingDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).IdentityMappings + svc := m.(paClient).IdentityMappings log.Printf("[INFO] ResourceID: %s", d.Id()) - _, err := svc.DeleteIdentityMappingCommand(&pingaccess.DeleteIdentityMappingCommandInput{Id: d.Id()}) + _, err := svc.DeleteIdentityMappingCommand(&identityMappings.DeleteIdentityMappingCommandInput{Id: d.Id()}) if err != nil { return diag.Errorf("unable to delete IdentityMapping: %s", err) } return nil } -func resourcePingAccessIdentityMappingReadResult(d *schema.ResourceData, input *pingaccess.IdentityMappingView, svc pingaccess.IdentityMappingsAPI) diag.Diagnostics { +func resourcePingAccessIdentityMappingReadResult(d *schema.ResourceData, input *models.IdentityMappingView, svc identityMappings.IdentityMappingsAPI) diag.Diagnostics { var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) @@ -129,11 +131,11 @@ func resourcePingAccessIdentityMappingReadResult(d *schema.ResourceData, input * return diags } -func resourcePingAccessIdentityMappingReadData(d *schema.ResourceData) *pingaccess.IdentityMappingView { +func resourcePingAccessIdentityMappingReadData(d *schema.ResourceData) *models.IdentityMappingView { config := d.Get("configuration").(string) var dat map[string]interface{} _ = json.Unmarshal([]byte(config), &dat) - idMapping := &pingaccess.IdentityMappingView{ + idMapping := &models.IdentityMappingView{ Name: String(d.Get("name").(string)), ClassName: String(d.Get("class_name").(string)), Configuration: dat, diff --git a/pingaccess/resource_pingaccess_identity_mapping_test.go b/pingaccess/resource_pingaccess_identity_mapping_test.go index 44f559ad..568cf61e 100644 --- a/pingaccess/resource_pingaccess_identity_mapping_test.go +++ b/pingaccess/resource_pingaccess_identity_mapping_test.go @@ -4,18 +4,18 @@ import ( "encoding/json" "fmt" "net/http" - "net/http/httptest" - "net/url" "regexp" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/identityMappings" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessIdentityMapping(t *testing.T) { @@ -158,8 +158,8 @@ func testAccCheckPingAccessIdentityMappingExists(n string) resource.TestCheckFun return fmt.Errorf("No IdentityMapping ID is set") } - conn := testAccProvider.Meta().(*pa.Client).IdentityMappings - result, _, err := conn.GetIdentityMappingCommand(&pa.GetIdentityMappingCommandInput{ + conn := testAccProvider.Meta().(paClient).IdentityMappings + result, _, err := conn.GetIdentityMappingCommand(&identityMappings.GetIdentityMappingCommandInput{ Id: rs.Primary.ID, }) @@ -182,8 +182,8 @@ func testAccCheckPingAccessIdentityMappingAttributes(n string) resource.TestChec return fmt.Errorf("No IdentityMapping ID is set") } - conn := testAccProvider.Meta().(*pa.Client).IdentityMappings - result, _, err := conn.GetIdentityMappingCommand(&pa.GetIdentityMappingCommandInput{ + conn := testAccProvider.Meta().(paClient).IdentityMappings + result, _, err := conn.GetIdentityMappingCommand(&identityMappings.GetIdentityMappingCommandInput{ Id: rs.Primary.ID, }) @@ -208,12 +208,16 @@ func testAccCheckPingAccessIdentityMappingAttributes(n string) resource.TestChec } } -func Test_resourcePingAccessIdentityMappingReadData(t *testing.T) { - descs := pa.DescriptorsView{ - Items: []*pa.DescriptorView{ +type idmappingsMock struct { + identityMappings.IdentityMappingsAPI +} + +func (i idmappingsMock) GetIdentityMappingDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) { + return &models.DescriptorsView{ + Items: []*models.DescriptorView{ { ClassName: String("something"), - ConfigurationFields: []*pa.ConfigurationField{ + ConfigurationFields: []*models.ConfigurationField{ { Name: String("password"), Type: String("CONCEALED"), @@ -222,30 +226,19 @@ func Test_resourcePingAccessIdentityMappingReadData(t *testing.T) { Label: nil, Type: nil, }, - }} - - server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - // Test request parameters - equals(t, req.URL.String(), "/identityMappings/descriptors") - // Send response to be tested - b, _ := json.Marshal(descs) - rw.Write(b) - })) - // Close the server when test finishes - defer server.Close() - - // Use Client & URL from our local test server - url, _ := url.Parse(server.URL) - c := pa.NewClient("", "", url, "", server.Client()) + }}, nil, nil +} + +func Test_resourcePingAccessIdentityMappingReadData(t *testing.T) { cases := []struct { Name string - IdentityMapping pa.IdentityMappingView + IdentityMapping models.IdentityMappingView ExpectedDiags diag.Diagnostics }{ { Name: "Stuff breaks", - IdentityMapping: pa.IdentityMappingView{ + IdentityMapping: models.IdentityMappingView{ Name: String("foo"), ClassName: String("foo"), }, @@ -257,7 +250,7 @@ func Test_resourcePingAccessIdentityMappingReadData(t *testing.T) { resourceSchema := resourcePingAccessIdentityMappingSchema() resourceLocalData := schema.TestResourceDataRaw(t, resourceSchema, map[string]interface{}{}) - diags := resourcePingAccessIdentityMappingReadResult(resourceLocalData, &tc.IdentityMapping, c.IdentityMappings) + diags := resourcePingAccessIdentityMappingReadResult(resourceLocalData, &tc.IdentityMapping, idmappingsMock{}) checkDiagnostics(t, tc.Name, diags, tc.ExpectedDiags) diff --git a/pingaccess/resource_pingaccess_keypair.go b/pingaccess/resource_pingaccess_keypair.go index 2d8eb0f8..aa362747 100644 --- a/pingaccess/resource_pingaccess_keypair.go +++ b/pingaccess/resource_pingaccess_keypair.go @@ -4,9 +4,11 @@ import ( "context" "strconv" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/keyPairs" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessKeyPair() *schema.Resource { @@ -165,10 +167,10 @@ func resourcePingAccessKeyPairSchema() map[string]*schema.Schema { } func resourcePingAccessKeyPairCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).KeyPairs + svc := m.(paClient).KeyPairs if _, ok := d.GetOk("file_data"); ok { - input := pa.ImportKeyPairCommandInput{ - Body: pa.PKCS12FileImportDocView{ + input := keyPairs.ImportKeyPairCommandInput{ + Body: models.PKCS12FileImportDocView{ Alias: String(d.Get("alias").(string)), FileData: String(d.Get("file_data").(string)), Password: String(d.Get("password").(string)), @@ -185,8 +187,8 @@ func resourcePingAccessKeyPairCreate(_ context.Context, d *schema.ResourceData, return resourcePingAccessKeyPairReadResult(d, result) } - input := pa.GenerateKeyPairCommandInput{ - Body: pa.NewKeyPairConfigView{ + input := keyPairs.GenerateKeyPairCommandInput{ + Body: models.NewKeyPairConfigView{ Alias: String(d.Get("alias").(string)), City: String(d.Get("city").(string)), CommonName: String(d.Get("common_name").(string)), @@ -211,8 +213,8 @@ func resourcePingAccessKeyPairCreate(_ context.Context, d *schema.ResourceData, } func resourcePingAccessKeyPairRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).KeyPairs - input := &pa.GetKeyPairCommandInput{ + svc := m.(paClient).KeyPairs + input := &keyPairs.GetKeyPairCommandInput{ Id: d.Id(), } result, _, err := svc.GetKeyPairCommand(input) @@ -223,8 +225,8 @@ func resourcePingAccessKeyPairRead(_ context.Context, d *schema.ResourceData, m } func resourcePingAccessKeyPairUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - input := pa.UpdateKeyPairCommandInput{ - Body: pa.PKCS12FileImportDocView{ + input := keyPairs.UpdateKeyPairCommandInput{ + Body: models.PKCS12FileImportDocView{ Alias: String(d.Get("alias").(string)), FileData: String(d.Get("file_data").(string)), Password: String(d.Get("password").(string)), @@ -237,7 +239,7 @@ func resourcePingAccessKeyPairUpdate(_ context.Context, d *schema.ResourceData, input.Body.HsmProviderId = Int(hsmID) } - svc := m.(*pa.Client).KeyPairs + svc := m.(paClient).KeyPairs result, _, err := svc.UpdateKeyPairCommand(&input) if err != nil { @@ -249,15 +251,15 @@ func resourcePingAccessKeyPairUpdate(_ context.Context, d *schema.ResourceData, } func resourcePingAccessKeyPairDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).KeyPairs - _, err := svc.DeleteKeyPairCommand(&pa.DeleteKeyPairCommandInput{Id: d.Id()}) + svc := m.(paClient).KeyPairs + _, err := svc.DeleteKeyPairCommand(&keyPairs.DeleteKeyPairCommandInput{Id: d.Id()}) if err != nil { return diag.Errorf("unable to delete KeyPair: %s", err) } return nil } -func resourcePingAccessKeyPairReadResult(d *schema.ResourceData, rv *pa.KeyPairView) diag.Diagnostics { +func resourcePingAccessKeyPairReadResult(d *schema.ResourceData, rv *models.KeyPairView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "alias", rv.Alias, &diags) if rv.ChainCertificates != nil { diff --git a/pingaccess/resource_pingaccess_keypair_test.go b/pingaccess/resource_pingaccess_keypair_test.go index 5635297e..ee80ff77 100644 --- a/pingaccess/resource_pingaccess_keypair_test.go +++ b/pingaccess/resource_pingaccess_keypair_test.go @@ -4,9 +4,10 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/services/keyPairs" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessKeyPair(t *testing.T) { @@ -96,8 +97,8 @@ func testAccCheckPingAccessKeyPairExists(n string) resource.TestCheckFunc { return fmt.Errorf("No KeyPair ID is set") } - conn := testAccProvider.Meta().(*pa.Client).KeyPairs - result, _, err := conn.GetKeyPairCommand(&pa.GetKeyPairCommandInput{ + conn := testAccProvider.Meta().(paClient).KeyPairs + result, _, err := conn.GetKeyPairCommand(&keyPairs.GetKeyPairCommandInput{ Id: rs.Primary.ID, }) @@ -120,8 +121,8 @@ func testAccCheckPingAccessKeyPairAttributes(n string) resource.TestCheckFunc { return fmt.Errorf("No KeyPair ID is set") } - conn := testAccProvider.Meta().(*pa.Client).KeyPairs - result, _, err := conn.GetKeyPairCommand(&pa.GetKeyPairCommandInput{ + conn := testAccProvider.Meta().(paClient).KeyPairs + result, _, err := conn.GetKeyPairCommand(&keyPairs.GetKeyPairCommandInput{ Id: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_oauth_server.go b/pingaccess/resource_pingaccess_oauth_server.go index b52462b8..0cc115d8 100644 --- a/pingaccess/resource_pingaccess_oauth_server.go +++ b/pingaccess/resource_pingaccess_oauth_server.go @@ -3,7 +3,8 @@ package pingaccess import ( "context" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/oauth" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -85,7 +86,7 @@ func resourcePingAccessOAuthServerCreate(ctx context.Context, d *schema.Resource } func resourcePingAccessOAuthServerRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Oauth + svc := m.(paClient).Oauth result, _, err := svc.GetAuthorizationServerCommand() if err != nil { return diag.Errorf("unable to read OAuthServerSettings: %s", err) @@ -95,8 +96,8 @@ func resourcePingAccessOAuthServerRead(_ context.Context, d *schema.ResourceData } func resourcePingAccessOAuthServerUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Oauth - input := pa.UpdateAuthorizationServerCommandInput{ + svc := m.(paClient).Oauth + input := oauth.UpdateAuthorizationServerCommandInput{ Body: *resourcePingAccessOAuthServerReadData(d), } result, _, err := svc.UpdateAuthorizationServerCommand(&input) @@ -109,7 +110,7 @@ func resourcePingAccessOAuthServerUpdate(_ context.Context, d *schema.ResourceDa } func resourcePingAccessOAuthServerDelete(_ context.Context, _ *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Oauth + svc := m.(paClient).Oauth _, err := svc.DeleteAuthorizationServerCommand() if err != nil { return diag.Errorf("unable to reset OAuthServerSettings: %s", err) @@ -117,7 +118,7 @@ func resourcePingAccessOAuthServerDelete(_ context.Context, _ *schema.ResourceDa return nil } -func resourcePingAccessOAuthServerReadResult(d *schema.ResourceData, input *pa.AuthorizationServerView) diag.Diagnostics { +func resourcePingAccessOAuthServerReadResult(d *schema.ResourceData, input *models.AuthorizationServerView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "audit_level", input.AuditLevel, &diags) setResourceDataBoolWithDiagnostic(d, "cache_tokens", input.CacheTokens, &diags) @@ -148,9 +149,9 @@ func resourcePingAccessOAuthServerReadResult(d *schema.ResourceData, input *pa.A return diags } -func resourcePingAccessOAuthServerReadData(d *schema.ResourceData) *pa.AuthorizationServerView { +func resourcePingAccessOAuthServerReadData(d *schema.ResourceData) *models.AuthorizationServerView { targets := expandStringList(d.Get("targets").(*schema.Set).List()) - oauth := &pa.AuthorizationServerView{ + oauth := &models.AuthorizationServerView{ IntrospectionEndpoint: String(d.Get("introspection_endpoint").(string)), SubjectAttributeName: String(d.Get("subject_attribute_name").(string)), Targets: &targets, diff --git a/pingaccess/resource_pingaccess_oauth_server_test.go b/pingaccess/resource_pingaccess_oauth_server_test.go index 03b32c99..159f4bfc 100644 --- a/pingaccess/resource_pingaccess_oauth_server_test.go +++ b/pingaccess/resource_pingaccess_oauth_server_test.go @@ -5,11 +5,12 @@ import ( "regexp" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessOAuthServer(t *testing.T) { @@ -73,7 +74,7 @@ func testAccCheckPingAccessOAuthServerExists(n string) resource.TestCheckFunc { return fmt.Errorf("No third party service ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Oauth + conn := testAccProvider.Meta().(paClient).Oauth result, _, err := conn.GetAuthorizationServerCommand() if err != nil { @@ -90,17 +91,17 @@ func testAccCheckPingAccessOAuthServerExists(n string) resource.TestCheckFunc { func Test_resourcePingAccessOAuthServerReadData(t *testing.T) { cases := []struct { - OAuthServer pa.AuthorizationServerView + OAuthServer models.AuthorizationServerView }{ { - OAuthServer: pa.AuthorizationServerView{ + OAuthServer: models.AuthorizationServerView{ IntrospectionEndpoint: String("/introspection"), SubjectAttributeName: String("alt"), Targets: &[]*string{String("localhost")}, TrustedCertificateGroupId: Int(0), - ClientCredentials: &pa.OAuthClientCredentialsView{ + ClientCredentials: &models.OAuthClientCredentialsView{ ClientId: String("client"), - ClientSecret: &pa.HiddenFieldView{ + ClientSecret: &models.HiddenFieldView{ Value: String("Secrets"), }, }, @@ -109,14 +110,14 @@ func Test_resourcePingAccessOAuthServerReadData(t *testing.T) { }, }, { - OAuthServer: pa.AuthorizationServerView{ + OAuthServer: models.AuthorizationServerView{ IntrospectionEndpoint: String("/introspection"), SubjectAttributeName: String("alt"), Targets: &[]*string{String("localhost")}, TrustedCertificateGroupId: Int(0), - ClientCredentials: &pa.OAuthClientCredentialsView{ + ClientCredentials: &models.OAuthClientCredentialsView{ ClientId: String("client"), - ClientSecret: &pa.HiddenFieldView{}, + ClientSecret: &models.HiddenFieldView{}, }, AuditLevel: String("none"), Secure: Bool(true), diff --git a/pingaccess/resource_pingaccess_pingfederate_admin.go b/pingaccess/resource_pingaccess_pingfederate_admin.go index 1c18f095..56e69944 100644 --- a/pingaccess/resource_pingaccess_pingfederate_admin.go +++ b/pingaccess/resource_pingaccess_pingfederate_admin.go @@ -3,7 +3,8 @@ package pingaccess import ( "context" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/pingfederate" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -70,7 +71,7 @@ func resourcePingAccessPingFederateAdminCreate(ctx context.Context, d *schema.Re } func resourcePingAccessPingFederateAdminRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Pingfederate + svc := m.(paClient).Pingfederate result, _, err := svc.GetPingFederateAdminCommand() if err != nil { return diag.Errorf("unable to read PingFederateAdmin: %s", err) @@ -80,8 +81,8 @@ func resourcePingAccessPingFederateAdminRead(_ context.Context, d *schema.Resour } func resourcePingAccessPingFederateAdminUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Pingfederate - input := pa.UpdatePingFederateAdminCommandInput{ + svc := m.(paClient).Pingfederate + input := pingfederate.UpdatePingFederateAdminCommandInput{ Body: *resourcePingAccessPingFederateAdminReadData(d), } result, _, err := svc.UpdatePingFederateAdminCommand(&input) @@ -94,7 +95,7 @@ func resourcePingAccessPingFederateAdminUpdate(_ context.Context, d *schema.Reso } func resourcePingAccessPingFederateAdminDelete(_ context.Context, _ *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Pingfederate + svc := m.(paClient).Pingfederate _, err := svc.DeletePingFederateCommand() if err != nil { return diag.Errorf("unable to reset PingFederateAdmin: %s", err) @@ -102,7 +103,7 @@ func resourcePingAccessPingFederateAdminDelete(_ context.Context, _ *schema.Reso return nil } -func resourcePingAccessPingFederateAdminReadResult(d *schema.ResourceData, input *pa.PingFederateAdminView) diag.Diagnostics { +func resourcePingAccessPingFederateAdminReadResult(d *schema.ResourceData, input *models.PingFederateAdminView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "admin_username", input.AdminUsername, &diags) setResourceDataStringWithDiagnostic(d, "audit_level", input.AuditLevel, &diags) @@ -127,8 +128,8 @@ func resourcePingAccessPingFederateAdminReadResult(d *schema.ResourceData, input return nil } -func resourcePingAccessPingFederateAdminReadData(d *schema.ResourceData) *pa.PingFederateAdminView { - pfAdmin := &pa.PingFederateAdminView{ +func resourcePingAccessPingFederateAdminReadData(d *schema.ResourceData) *models.PingFederateAdminView { + pfAdmin := &models.PingFederateAdminView{ AdminUsername: String(d.Get("admin_username").(string)), Host: String(d.Get("host").(string)), Port: Int(d.Get("port").(int)), diff --git a/pingaccess/resource_pingaccess_pingfederate_admin_test.go b/pingaccess/resource_pingaccess_pingfederate_admin_test.go index bbb9367d..c9eb9dd6 100644 --- a/pingaccess/resource_pingaccess_pingfederate_admin_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_admin_test.go @@ -6,12 +6,13 @@ import ( "os" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessPingFederateAdmin(t *testing.T) { @@ -69,7 +70,7 @@ func testAccCheckPingAccessPingFederateAdminExists(n string) resource.TestCheckF return fmt.Errorf("No third party service ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Pingfederate + conn := testAccProvider.Meta().(paClient).Pingfederate result, _, err := conn.GetPingFederateAdminCommand() if err != nil { @@ -86,11 +87,11 @@ func testAccCheckPingAccessPingFederateAdminExists(n string) resource.TestCheckF func Test_resourcePingAccessPingFederateAdminReadData(t *testing.T) { cases := []struct { - PingFederateAdmin pa.PingFederateAdminView + PingFederateAdmin models.PingFederateAdminView }{ { - PingFederateAdmin: pa.PingFederateAdminView{ - AdminPassword: &pa.HiddenFieldView{ + PingFederateAdmin: models.PingFederateAdminView{ + AdminPassword: &models.HiddenFieldView{ Value: String("secret"), }, AdminUsername: String("admin"), @@ -101,8 +102,8 @@ func Test_resourcePingAccessPingFederateAdminReadData(t *testing.T) { }, }, { - PingFederateAdmin: pa.PingFederateAdminView{ - AdminPassword: &pa.HiddenFieldView{ + PingFederateAdmin: models.PingFederateAdminView{ + AdminPassword: &models.HiddenFieldView{ Value: String("secret"), }, AdminUsername: String("admin"), diff --git a/pingaccess/resource_pingaccess_pingfederate_oauth.go b/pingaccess/resource_pingaccess_pingfederate_oauth.go index 8b1e5f89..7697ec56 100644 --- a/pingaccess/resource_pingaccess_pingfederate_oauth.go +++ b/pingaccess/resource_pingaccess_pingfederate_oauth.go @@ -3,7 +3,8 @@ package pingaccess import ( "context" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/pingfederate" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -70,7 +71,7 @@ func resourcePingAccessPingFederateOAuthCreate(ctx context.Context, d *schema.Re } func resourcePingAccessPingFederateOAuthRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Pingfederate + svc := m.(paClient).Pingfederate result, _, err := svc.GetPingFederateAccessTokensCommand() if err != nil { return diag.Errorf("unable to read PingFederateOAuth: %s", err) @@ -80,8 +81,8 @@ func resourcePingAccessPingFederateOAuthRead(_ context.Context, d *schema.Resour } func resourcePingAccessPingFederateOAuthUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Pingfederate - input := pa.UpdatePingFederateAccessTokensCommandInput{ + svc := m.(paClient).Pingfederate + input := pingfederate.UpdatePingFederateAccessTokensCommandInput{ Body: *resourcePingAccessPingFederateOAuthReadData(d), } result, _, err := svc.UpdatePingFederateAccessTokensCommand(&input) @@ -94,7 +95,7 @@ func resourcePingAccessPingFederateOAuthUpdate(_ context.Context, d *schema.Reso } func resourcePingAccessPingFederateOAuthDelete(_ context.Context, _ *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Pingfederate + svc := m.(paClient).Pingfederate _, err := svc.DeletePingFederateAccessTokensCommand() if err != nil { return diag.Errorf("unable to reset PingFederateOAuth: %s", err) @@ -102,7 +103,7 @@ func resourcePingAccessPingFederateOAuthDelete(_ context.Context, _ *schema.Reso return nil } -func resourcePingAccessPingFederateOAuthReadResult(d *schema.ResourceData, input *pa.PingFederateAccessTokenView) diag.Diagnostics { +func resourcePingAccessPingFederateOAuthReadResult(d *schema.ResourceData, input *models.PingFederateAccessTokenView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataIntWithDiagnostic(d, "access_validator_id", input.AccessValidatorId, &diags) setResourceDataBoolWithDiagnostic(d, "cache_tokens", input.CacheTokens, &diags) @@ -127,8 +128,8 @@ func resourcePingAccessPingFederateOAuthReadResult(d *schema.ResourceData, input return diags } -func resourcePingAccessPingFederateOAuthReadData(d *schema.ResourceData) *pa.PingFederateAccessTokenView { - oauth := &pa.PingFederateAccessTokenView{ +func resourcePingAccessPingFederateOAuthReadData(d *schema.ResourceData) *models.PingFederateAccessTokenView { + oauth := &models.PingFederateAccessTokenView{ ClientId: String(d.Get("client_id").(string)), SubjectAttributeName: String(d.Get("subject_attribute_name").(string)), } diff --git a/pingaccess/resource_pingaccess_pingfederate_oauth_test.go b/pingaccess/resource_pingaccess_pingfederate_oauth_test.go index 7e2974cb..655289fc 100644 --- a/pingaccess/resource_pingaccess_pingfederate_oauth_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_oauth_test.go @@ -4,12 +4,13 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessPingFederateOAuth(t *testing.T) { @@ -57,7 +58,7 @@ func testAccCheckPingAccessPingFederateOAuthExists(n string) resource.TestCheckF return fmt.Errorf("No third party service ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Pingfederate + conn := testAccProvider.Meta().(paClient).Pingfederate result, _, err := conn.GetPingFederateAccessTokensCommand() if err != nil { @@ -74,10 +75,10 @@ func testAccCheckPingAccessPingFederateOAuthExists(n string) resource.TestCheckF func Test_resourcePingAccessPingFederateOAuthReadData(t *testing.T) { cases := []struct { - PingFederateAccessTokenView pa.PingFederateAccessTokenView + PingFederateAccessTokenView models.PingFederateAccessTokenView }{ { - PingFederateAccessTokenView: pa.PingFederateAccessTokenView{ + PingFederateAccessTokenView: models.PingFederateAccessTokenView{ ClientId: String("client_1"), SubjectAttributeName: String("san"), AccessValidatorId: Int(1), @@ -86,7 +87,7 @@ func Test_resourcePingAccessPingFederateOAuthReadData(t *testing.T) { }, }, { - PingFederateAccessTokenView: pa.PingFederateAccessTokenView{ + PingFederateAccessTokenView: models.PingFederateAccessTokenView{ ClientId: String("client_1"), SubjectAttributeName: String("san"), AccessValidatorId: Int(1), @@ -95,7 +96,7 @@ func Test_resourcePingAccessPingFederateOAuthReadData(t *testing.T) { SendAudience: Bool(true), TokenTimeToLiveSeconds: Int(30), UseTokenIntrospection: Bool(true), - ClientSecret: &pa.HiddenFieldView{ + ClientSecret: &models.HiddenFieldView{ Value: String("password"), }, }, diff --git a/pingaccess/resource_pingaccess_pingfederate_runtime.go b/pingaccess/resource_pingaccess_pingfederate_runtime.go index d8b7909f..2d1dd0c4 100644 --- a/pingaccess/resource_pingaccess_pingfederate_runtime.go +++ b/pingaccess/resource_pingaccess_pingfederate_runtime.go @@ -2,7 +2,9 @@ package pingaccess import ( "context" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/pingfederate" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -138,7 +140,7 @@ func resourcePingAccessPingFederateRuntimeCreate(ctx context.Context, d *schema. } func resourcePingAccessPingFederateRuntimeRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Pingfederate + svc := m.(paClient).Pingfederate if _, ok := d.GetOk("issuer"); ok { result, _, err := svc.GetPingFederateRuntimeCommand() if err != nil { @@ -157,10 +159,10 @@ func resourcePingAccessPingFederateRuntimeRead(_ context.Context, d *schema.Reso } func resourcePingAccessPingFederateRuntimeUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Pingfederate + svc := m.(paClient).Pingfederate if _, ok := d.GetOk("issuer"); ok { - input := pa.UpdatePingFederateRuntimeCommandInput{ + input := pingfederate.UpdatePingFederateRuntimeCommandInput{ Body: *resourcePingAccessPingFederateRuntimeReadData(d), } result, _, err := svc.UpdatePingFederateRuntimeCommand(&input) @@ -172,7 +174,7 @@ func resourcePingAccessPingFederateRuntimeUpdate(_ context.Context, d *schema.Re return resourcePingAccessPingFederateRuntimeReadResult(d, result) } - input := pa.UpdatePingFederateCommandInput{ + input := pingfederate.UpdatePingFederateCommandInput{ Body: *resourcePingAccessPingFederateDeprecatedRuntimeReadData(d), } result, _, err := svc.UpdatePingFederateCommand(&input) @@ -185,7 +187,7 @@ func resourcePingAccessPingFederateRuntimeUpdate(_ context.Context, d *schema.Re } func resourcePingAccessPingFederateRuntimeDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Pingfederate + svc := m.(paClient).Pingfederate if _, ok := d.GetOk("issuer"); ok { _, err := svc.DeletePingFederateRuntimeCommand() if err != nil { @@ -201,7 +203,7 @@ func resourcePingAccessPingFederateRuntimeDelete(_ context.Context, d *schema.Re return nil } -func resourcePingAccessPingFederateRuntimeReadResult(d *schema.ResourceData, input *pa.PingFederateMetadataRuntimeView) diag.Diagnostics { +func resourcePingAccessPingFederateRuntimeReadResult(d *schema.ResourceData, input *models.PingFederateMetadataRuntimeView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "description", input.Description, &diags) setResourceDataStringWithDiagnostic(d, "issuer", input.Issuer, &diags) @@ -213,7 +215,7 @@ func resourcePingAccessPingFederateRuntimeReadResult(d *schema.ResourceData, inp return diags } -func resourcePingAccessPingFederateDeprecatedRuntimeReadResult(d *schema.ResourceData, input *pa.PingFederateRuntimeView) diag.Diagnostics { +func resourcePingAccessPingFederateDeprecatedRuntimeReadResult(d *schema.ResourceData, input *models.PingFederateRuntimeView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "audit_level", input.AuditLevel, &diags) setResourceDataStringWithDiagnostic(d, "back_channel_base_path", input.BackChannelBasePath, &diags) @@ -235,8 +237,8 @@ func resourcePingAccessPingFederateDeprecatedRuntimeReadResult(d *schema.Resourc return diags } -func resourcePingAccessPingFederateRuntimeReadData(d *schema.ResourceData) *pa.PingFederateMetadataRuntimeView { - pfRuntime := &pa.PingFederateMetadataRuntimeView{ +func resourcePingAccessPingFederateRuntimeReadData(d *schema.ResourceData) *models.PingFederateMetadataRuntimeView { + pfRuntime := &models.PingFederateMetadataRuntimeView{ Issuer: String(d.Get("issuer").(string)), TrustedCertificateGroupId: Int(d.Get("trusted_certificate_group_id").(int)), UseProxy: Bool(d.Get("use_proxy").(bool)), @@ -253,8 +255,8 @@ func resourcePingAccessPingFederateRuntimeReadData(d *schema.ResourceData) *pa.P return pfRuntime } -func resourcePingAccessPingFederateDeprecatedRuntimeReadData(d *schema.ResourceData) *pa.PingFederateRuntimeView { - pfRuntime := &pa.PingFederateRuntimeView{ +func resourcePingAccessPingFederateDeprecatedRuntimeReadData(d *schema.ResourceData) *models.PingFederateRuntimeView { + pfRuntime := &models.PingFederateRuntimeView{ Host: String(d.Get("host").(string)), Port: Int(d.Get("port").(int)), TrustedCertificateGroupId: Int(d.Get("trusted_certificate_group_id").(int)), diff --git a/pingaccess/resource_pingaccess_pingfederate_runtime_test.go b/pingaccess/resource_pingaccess_pingfederate_runtime_test.go index 670c99d8..2fe25139 100644 --- a/pingaccess/resource_pingaccess_pingfederate_runtime_test.go +++ b/pingaccess/resource_pingaccess_pingfederate_runtime_test.go @@ -6,12 +6,13 @@ import ( "os" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessPingFederateRuntime(t *testing.T) { @@ -118,7 +119,7 @@ func testAccCheckPingAccessPingFederateRuntimeExists(n string) resource.TestChec return fmt.Errorf("not found: %s", n) } - conn := testAccProvider.Meta().(*pa.Client).Pingfederate + conn := testAccProvider.Meta().(paClient).Pingfederate result, _, err := conn.GetPingFederateRuntimeCommand() if err != nil { @@ -140,7 +141,7 @@ func testAccCheckPingAccessPingFederateDeprecatedRuntimeExists(n string) resourc return fmt.Errorf("not found: %s", n) } - conn := testAccProvider.Meta().(*pa.Client).Pingfederate + conn := testAccProvider.Meta().(paClient).Pingfederate result, resp, err := conn.GetPingFederateCommand() if err != nil { @@ -161,10 +162,10 @@ func testAccCheckPingAccessPingFederateDeprecatedRuntimeExists(n string) resourc func Test_resourcePingAccessPingFederateRuntimeReadData(t *testing.T) { cases := []struct { - PingFederateRuntime pa.PingFederateMetadataRuntimeView + PingFederateRuntime models.PingFederateMetadataRuntimeView }{ { - PingFederateRuntime: pa.PingFederateMetadataRuntimeView{ + PingFederateRuntime: models.PingFederateMetadataRuntimeView{ Issuer: String("localhost"), SkipHostnameVerification: Bool(true), UseProxy: Bool(false), @@ -173,7 +174,7 @@ func Test_resourcePingAccessPingFederateRuntimeReadData(t *testing.T) { }, }, { - PingFederateRuntime: pa.PingFederateMetadataRuntimeView{ + PingFederateRuntime: models.PingFederateMetadataRuntimeView{ Issuer: String("localhost"), Description: String("foo"), SkipHostnameVerification: Bool(true), @@ -200,10 +201,10 @@ func Test_resourcePingAccessPingFederateRuntimeReadData(t *testing.T) { func Test_resourcePingAccessPingFederateDeprecatedRuntimeReadData(t *testing.T) { cases := []struct { - PingFederateRuntime pa.PingFederateRuntimeView + PingFederateRuntime models.PingFederateRuntimeView }{ { - PingFederateRuntime: pa.PingFederateRuntimeView{ + PingFederateRuntime: models.PingFederateRuntimeView{ Host: String("localhost"), Port: Int(9031), SkipHostnameVerification: Bool(true), @@ -214,7 +215,7 @@ func Test_resourcePingAccessPingFederateDeprecatedRuntimeReadData(t *testing.T) }, }, { - PingFederateRuntime: pa.PingFederateRuntimeView{ + PingFederateRuntime: models.PingFederateRuntimeView{ Host: String("localhost"), Port: Int(9031), AuditLevel: String("ON"), diff --git a/pingaccess/resource_pingaccess_rule.go b/pingaccess/resource_pingaccess_rule.go index cf540f6f..5785d9ff 100644 --- a/pingaccess/resource_pingaccess_rule.go +++ b/pingaccess/resource_pingaccess_rule.go @@ -5,9 +5,11 @@ import ( "encoding/json" "fmt" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/rules" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessRule() *schema.Resource { @@ -22,7 +24,7 @@ func resourcePingAccessRule() *schema.Resource { Schema: resourcePingAccessRuleSchema(), CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { - svc := m.(*pingaccess.Client).Rules + svc := m.(paClient).Rules desc, _, err := svc.GetRuleDescriptorsCommand() if err != nil { return fmt.Errorf("unable to retrieve Rule descriptors %s", err) @@ -56,8 +58,8 @@ func resourcePingAccessRuleSchema() map[string]*schema.Schema { } func resourcePingAccessRuleCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).Rules - input := pingaccess.AddRuleCommandInput{ + svc := m.(paClient).Rules + input := rules.AddRuleCommandInput{ Body: *resourcePingAccessRuleReadData(d), } @@ -71,8 +73,8 @@ func resourcePingAccessRuleCreate(_ context.Context, d *schema.ResourceData, m i } func resourcePingAccessRuleRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).Rules - input := &pingaccess.GetRuleCommandInput{ + svc := m.(paClient).Rules + input := &rules.GetRuleCommandInput{ Id: d.Id(), } @@ -85,8 +87,8 @@ func resourcePingAccessRuleRead(_ context.Context, d *schema.ResourceData, m int } func resourcePingAccessRuleUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).Rules - input := pingaccess.UpdateRuleCommandInput{ + svc := m.(paClient).Rules + input := rules.UpdateRuleCommandInput{ Body: *resourcePingAccessRuleReadData(d), Id: d.Id(), } @@ -101,15 +103,15 @@ func resourcePingAccessRuleUpdate(_ context.Context, d *schema.ResourceData, m i } func resourcePingAccessRuleDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).Rules - _, err := svc.DeleteRuleCommand(&pingaccess.DeleteRuleCommandInput{Id: d.Id()}) + svc := m.(paClient).Rules + _, err := svc.DeleteRuleCommand(&rules.DeleteRuleCommandInput{Id: d.Id()}) if err != nil { return diag.Errorf("unable to delete Rule: %s", err) } return nil } -func resourcePingAccessRuleReadResult(d *schema.ResourceData, input *pingaccess.RuleView, svc pingaccess.RulesAPI) diag.Diagnostics { +func resourcePingAccessRuleReadResult(d *schema.ResourceData, input *models.RuleView, svc rules.RulesAPI) diag.Diagnostics { var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) @@ -131,12 +133,12 @@ func resourcePingAccessRuleReadResult(d *schema.ResourceData, input *pingaccess. return diags } -func resourcePingAccessRuleReadData(d *schema.ResourceData) *pingaccess.RuleView { +func resourcePingAccessRuleReadData(d *schema.ResourceData) *models.RuleView { config := d.Get("configuration").(string) var dat map[string]interface{} _ = json.Unmarshal([]byte(config), &dat) supDests := expandStringList(d.Get("supported_destinations").(*schema.Set).List()) - rule := &pingaccess.RuleView{ + rule := &models.RuleView{ Name: String(d.Get("name").(string)), ClassName: String(d.Get("class_name").(string)), Configuration: dat, diff --git a/pingaccess/resource_pingaccess_rule_test.go b/pingaccess/resource_pingaccess_rule_test.go index e7ba45f5..85dc567a 100644 --- a/pingaccess/resource_pingaccess_rule_test.go +++ b/pingaccess/resource_pingaccess_rule_test.go @@ -4,9 +4,10 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/services/rules" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessRule(t *testing.T) { @@ -84,8 +85,8 @@ func testAccCheckPingAccessRuleExists(n string) resource.TestCheckFunc { return fmt.Errorf("No rule ID is set") } - conn := testAccProvider.Meta().(*pingaccess.Client).Rules - result, _, err := conn.GetRuleCommand(&pingaccess.GetRuleCommandInput{ + conn := testAccProvider.Meta().(paClient).Rules + result, _, err := conn.GetRuleCommand(&rules.GetRuleCommandInput{ Id: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_rulesets.go b/pingaccess/resource_pingaccess_rulesets.go index 91fce526..7596132e 100644 --- a/pingaccess/resource_pingaccess_rulesets.go +++ b/pingaccess/resource_pingaccess_rulesets.go @@ -4,9 +4,11 @@ import ( "context" "strconv" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/rulesets" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessRuleSet() *schema.Resource { @@ -63,8 +65,8 @@ func resourcePingAccessRuleSetCreate(ctx context.Context, d *schema.ResourceData polIds = append(polIds, &text) } - input := pingaccess.AddRuleSetCommandInput{ - Body: pingaccess.RuleSetView{ + input := rulesets.AddRuleSetCommandInput{ + Body: models.RuleSetView{ Name: String(name), ElementType: String(elementType), Policy: &polIds, @@ -72,7 +74,7 @@ func resourcePingAccessRuleSetCreate(ctx context.Context, d *schema.ResourceData }, } - svc := m.(*pingaccess.Client).Rulesets + svc := m.(paClient).Rulesets result, _, err := svc.AddRuleSetCommand(&input) if err != nil { @@ -84,8 +86,8 @@ func resourcePingAccessRuleSetCreate(ctx context.Context, d *schema.ResourceData } func resourcePingAccessRuleSetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).Rulesets - result, _, err := svc.GetRuleSetCommand(&pingaccess.GetRuleSetCommandInput{ + svc := m.(paClient).Rulesets + result, _, err := svc.GetRuleSetCommand(&rulesets.GetRuleSetCommandInput{ Id: d.Id(), }) if err != nil { @@ -107,8 +109,8 @@ func resourcePingAccessRuleSetUpdate(ctx context.Context, d *schema.ResourceData polIds = append(polIds, &text) } - input := pingaccess.UpdateRuleSetCommandInput{ - Body: pingaccess.RuleSetView{ + input := rulesets.UpdateRuleSetCommandInput{ + Body: models.RuleSetView{ Name: String(name), ElementType: String(elementType), Policy: &polIds, @@ -117,7 +119,7 @@ func resourcePingAccessRuleSetUpdate(ctx context.Context, d *schema.ResourceData Id: d.Id(), } - svc := m.(*pingaccess.Client).Rulesets + svc := m.(paClient).Rulesets result, _, err := svc.UpdateRuleSetCommand(&input) if err != nil { @@ -128,16 +130,16 @@ func resourcePingAccessRuleSetUpdate(ctx context.Context, d *schema.ResourceData } func resourcePingAccessRuleSetDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).Rulesets + svc := m.(paClient).Rulesets - _, err := svc.DeleteRuleSetCommand(&pingaccess.DeleteRuleSetCommandInput{Id: d.Id()}) + _, err := svc.DeleteRuleSetCommand(&rulesets.DeleteRuleSetCommandInput{Id: d.Id()}) if err != nil { return diag.Errorf("unable to delete RuleSet: %s", err) } return nil } -func resourcePingAccessRuleSetReadResult(d *schema.ResourceData, input *pingaccess.RuleSetView) diag.Diagnostics { +func resourcePingAccessRuleSetReadResult(d *schema.ResourceData, input *models.RuleSetView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) setResourceDataStringWithDiagnostic(d, "success_criteria", input.SuccessCriteria, &diags) diff --git a/pingaccess/resource_pingaccess_rulesets_test.go b/pingaccess/resource_pingaccess_rulesets_test.go index f17b74d2..0743fcea 100644 --- a/pingaccess/resource_pingaccess_rulesets_test.go +++ b/pingaccess/resource_pingaccess_rulesets_test.go @@ -4,9 +4,10 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/services/rulesets" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessRuleSet(t *testing.T) { @@ -84,8 +85,8 @@ func testAccCheckPingAccessRuleSetAttributes(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] - conn := testAccProvider.Meta().(*pa.Client).Rulesets - result, _, err := conn.GetRuleSetCommand(&pa.GetRuleSetCommandInput{ + conn := testAccProvider.Meta().(paClient).Rulesets + result, _, err := conn.GetRuleSetCommand(&rulesets.GetRuleSetCommandInput{ Id: rs.Primary.ID, }) @@ -112,8 +113,8 @@ func testAccCheckPingAccessRuleSetExists(n string) resource.TestCheckFunc { return fmt.Errorf("No RuleSet ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Rulesets - result, _, err := conn.GetRuleSetCommand(&pa.GetRuleSetCommandInput{ + conn := testAccProvider.Meta().(paClient).Rulesets + result, _, err := conn.GetRuleSetCommand(&rulesets.GetRuleSetCommandInput{ Id: rs.Primary.ID, }) diff --git a/pingaccess/resource_pingaccess_site.go b/pingaccess/resource_pingaccess_site.go index 268f58f5..4abe7355 100644 --- a/pingaccess/resource_pingaccess_site.go +++ b/pingaccess/resource_pingaccess_site.go @@ -3,9 +3,11 @@ package pingaccess import ( "context" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/sites" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessSite() *schema.Resource { @@ -105,8 +107,8 @@ func resourcePingAccessSiteSchema() map[string]*schema.Schema { } func resourcePingAccessSiteCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Sites - input := pa.AddSiteCommandInput{ + svc := m.(paClient).Sites + input := sites.AddSiteCommandInput{ Body: *resourcePingAccessSiteReadData(d), } @@ -120,8 +122,8 @@ func resourcePingAccessSiteCreate(_ context.Context, d *schema.ResourceData, m i } func resourcePingAccessSiteRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Sites - input := &pa.GetSiteCommandInput{ + svc := m.(paClient).Sites + input := &sites.GetSiteCommandInput{ Id: d.Id(), } result, _, err := svc.GetSiteCommand(input) @@ -132,8 +134,8 @@ func resourcePingAccessSiteRead(_ context.Context, d *schema.ResourceData, m int } func resourcePingAccessSiteUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Sites - input := pa.UpdateSiteCommandInput{ + svc := m.(paClient).Sites + input := sites.UpdateSiteCommandInput{ Body: *resourcePingAccessSiteReadData(d), Id: d.Id(), } @@ -145,8 +147,8 @@ func resourcePingAccessSiteUpdate(_ context.Context, d *schema.ResourceData, m i } func resourcePingAccessSiteDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Sites - input := &pa.DeleteSiteCommandInput{ + svc := m.(paClient).Sites + input := &sites.DeleteSiteCommandInput{ Id: d.Id(), } @@ -157,7 +159,7 @@ func resourcePingAccessSiteDelete(_ context.Context, d *schema.ResourceData, m i return nil } -func resourcePingAccessSiteReadResult(d *schema.ResourceData, input *pa.SiteView) diag.Diagnostics { +func resourcePingAccessSiteReadResult(d *schema.ResourceData, input *models.SiteView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataIntWithDiagnostic(d, "availability_profile_id", input.AvailabilityProfileId, &diags) setResourceDataStringWithDiagnostic(d, "expected_hostname", input.ExpectedHostname, &diags) @@ -181,9 +183,9 @@ func resourcePingAccessSiteReadResult(d *schema.ResourceData, input *pa.SiteView return diags } -func resourcePingAccessSiteReadData(d *schema.ResourceData) *pa.SiteView { +func resourcePingAccessSiteReadData(d *schema.ResourceData) *models.SiteView { targets := expandStringList(d.Get("targets").(*schema.Set).List()) - site := &pa.SiteView{ + site := &models.SiteView{ Name: String(d.Get("name").(string)), Targets: &targets, } diff --git a/pingaccess/resource_pingaccess_site_authenticator.go b/pingaccess/resource_pingaccess_site_authenticator.go index 89e3edf4..116e1525 100644 --- a/pingaccess/resource_pingaccess_site_authenticator.go +++ b/pingaccess/resource_pingaccess_site_authenticator.go @@ -5,9 +5,11 @@ import ( "encoding/json" "fmt" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessSiteAuthenticator() *schema.Resource { @@ -21,7 +23,7 @@ func resourcePingAccessSiteAuthenticator() *schema.Resource { }, Schema: resourcePingAccessSiteAuthenticatorSchema(), CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { - svc := m.(*pingaccess.Client).SiteAuthenticators + svc := m.(paClient).SiteAuthenticators desc, _, err := svc.GetSiteAuthenticatorDescriptorsCommand() if err != nil { return fmt.Errorf("unable to retrieve SiteAuthenticator descriptors %s", err) @@ -54,8 +56,8 @@ func resourcePingAccessSiteAuthenticatorSchema() map[string]*schema.Schema { } func resourcePingAccessSiteAuthenticatorCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).SiteAuthenticators - input := pingaccess.AddSiteAuthenticatorCommandInput{ + svc := m.(paClient).SiteAuthenticators + input := siteAuthenticators.AddSiteAuthenticatorCommandInput{ Body: *resourcePingAccessSiteAuthenticatorReadData(d), } @@ -69,8 +71,8 @@ func resourcePingAccessSiteAuthenticatorCreate(_ context.Context, d *schema.Reso } func resourcePingAccessSiteAuthenticatorRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).SiteAuthenticators - input := &pingaccess.GetSiteAuthenticatorCommandInput{ + svc := m.(paClient).SiteAuthenticators + input := &siteAuthenticators.GetSiteAuthenticatorCommandInput{ Id: d.Id(), } result, _, err := svc.GetSiteAuthenticatorCommand(input) @@ -81,8 +83,8 @@ func resourcePingAccessSiteAuthenticatorRead(_ context.Context, d *schema.Resour } func resourcePingAccessSiteAuthenticatorUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).SiteAuthenticators - input := pingaccess.UpdateSiteAuthenticatorCommandInput{ + svc := m.(paClient).SiteAuthenticators + input := siteAuthenticators.UpdateSiteAuthenticatorCommandInput{ Body: *resourcePingAccessSiteAuthenticatorReadData(d), Id: d.Id(), } @@ -94,8 +96,8 @@ func resourcePingAccessSiteAuthenticatorUpdate(_ context.Context, d *schema.Reso } func resourcePingAccessSiteAuthenticatorDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pingaccess.Client).SiteAuthenticators - input := &pingaccess.DeleteSiteAuthenticatorCommandInput{ + svc := m.(paClient).SiteAuthenticators + input := &siteAuthenticators.DeleteSiteAuthenticatorCommandInput{ Id: d.Id(), } @@ -106,7 +108,7 @@ func resourcePingAccessSiteAuthenticatorDelete(_ context.Context, d *schema.Reso return nil } -func resourcePingAccessSiteAuthenticatorReadResult(d *schema.ResourceData, input *pingaccess.SiteAuthenticatorView, svc pingaccess.SiteAuthenticatorsAPI) diag.Diagnostics { +func resourcePingAccessSiteAuthenticatorReadResult(d *schema.ResourceData, input *models.SiteAuthenticatorView, svc siteAuthenticators.SiteAuthenticatorsAPI) diag.Diagnostics { var diags diag.Diagnostics b, _ := json.Marshal(input.Configuration) config := string(b) @@ -125,11 +127,11 @@ func resourcePingAccessSiteAuthenticatorReadResult(d *schema.ResourceData, input return diags } -func resourcePingAccessSiteAuthenticatorReadData(d *schema.ResourceData) *pingaccess.SiteAuthenticatorView { +func resourcePingAccessSiteAuthenticatorReadData(d *schema.ResourceData) *models.SiteAuthenticatorView { config := d.Get("configuration").(string) var dat map[string]interface{} _ = json.Unmarshal([]byte(config), &dat) - siteAuthenticator := &pingaccess.SiteAuthenticatorView{ + siteAuthenticator := &models.SiteAuthenticatorView{ Name: String(d.Get("name").(string)), ClassName: String(d.Get("class_name").(string)), Configuration: dat, diff --git a/pingaccess/resource_pingaccess_site_authenticator_test.go b/pingaccess/resource_pingaccess_site_authenticator_test.go index 8233fb34..e9d756d2 100644 --- a/pingaccess/resource_pingaccess_site_authenticator_test.go +++ b/pingaccess/resource_pingaccess_site_authenticator_test.go @@ -1,20 +1,19 @@ package pingaccess import ( - "encoding/json" "fmt" "net/http" - "net/http/httptest" - "net/url" "regexp" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessSiteAuthenticator(t *testing.T) { @@ -115,8 +114,8 @@ func testAccCheckPingAccessSiteAuthenticatorExists(n string) resource.TestCheckF return fmt.Errorf("No site_authenticator ID is set") } - conn := testAccProvider.Meta().(*pa.Client).SiteAuthenticators - result, _, err := conn.GetSiteAuthenticatorCommand(&pa.GetSiteAuthenticatorCommandInput{ + conn := testAccProvider.Meta().(paClient).SiteAuthenticators + result, _, err := conn.GetSiteAuthenticatorCommand(&siteAuthenticators.GetSiteAuthenticatorCommandInput{ Id: rs.Primary.ID, }) @@ -132,13 +131,16 @@ func testAccCheckPingAccessSiteAuthenticatorExists(n string) resource.TestCheckF } } -func Test_resourcePingAccessSiteAuthenticatorReadData(t *testing.T) { +type siteAuthsMock struct { + siteAuthenticators.SiteAuthenticatorsAPI +} - descs := pa.DescriptorsView{ - Items: []*pa.DescriptorView{ +func (s siteAuthsMock) GetSiteAuthenticatorDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) { + return &models.DescriptorsView{ + Items: []*models.DescriptorView{ { ClassName: String("something"), - ConfigurationFields: []*pa.ConfigurationField{ + ConfigurationFields: []*models.ConfigurationField{ { Name: String("password"), Type: String("CONCEALED"), @@ -147,27 +149,15 @@ func Test_resourcePingAccessSiteAuthenticatorReadData(t *testing.T) { Label: nil, Type: nil, }, - }} - - server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - // Test request parameters - equals(t, req.URL.String(), "/siteAuthenticators/descriptors") - // Send response to be tested - b, _ := json.Marshal(descs) - rw.Write(b) - })) - // Close the server when test finishes - defer server.Close() - - // Use Client & URL from our local test server - url, _ := url.Parse(server.URL) - c := pa.NewClient("", "", url, "", server.Client()) + }}, nil, err +} +func Test_resourcePingAccessSiteAuthenticatorReadData(t *testing.T) { cases := []struct { - SiteAuthenticator pa.SiteAuthenticatorView + SiteAuthenticator models.SiteAuthenticatorView }{ { - SiteAuthenticator: pa.SiteAuthenticatorView{ + SiteAuthenticator: models.SiteAuthenticatorView{ Name: String("demo"), ClassName: String("something"), Configuration: map[string]interface{}{ @@ -183,7 +173,7 @@ func Test_resourcePingAccessSiteAuthenticatorReadData(t *testing.T) { t.Run(fmt.Sprintf("tc:%v", i), func(t *testing.T) { resourceSchema := resourcePingAccessSiteAuthenticatorSchema() resourceLocalData := schema.TestResourceDataRaw(t, resourceSchema, map[string]interface{}{}) - resourcePingAccessSiteAuthenticatorReadResult(resourceLocalData, &tc.SiteAuthenticator, c.SiteAuthenticators) + resourcePingAccessSiteAuthenticatorReadResult(resourceLocalData, &tc.SiteAuthenticator, siteAuthsMock{}) if got := *resourcePingAccessSiteAuthenticatorReadData(resourceLocalData); !cmp.Equal(got, tc.SiteAuthenticator) { t.Errorf("resourcePingAccessSiteAuthenticatorReadData() = %v", cmp.Diff(got, tc.SiteAuthenticator)) diff --git a/pingaccess/resource_pingaccess_site_test.go b/pingaccess/resource_pingaccess_site_test.go index 72ab709c..3d9b976e 100644 --- a/pingaccess/resource_pingaccess_site_test.go +++ b/pingaccess/resource_pingaccess_site_test.go @@ -5,12 +5,14 @@ import ( "strings" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/sites" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessSite(t *testing.T) { @@ -89,8 +91,8 @@ func testAccCheckPingAccessSiteExists(n string) resource.TestCheckFunc { return fmt.Errorf("no site ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Sites - result, _, err := conn.GetSiteCommand(&pa.GetSiteCommandInput{ + conn := testAccProvider.Meta().(paClient).Sites + result, _, err := conn.GetSiteCommand(&sites.GetSiteCommandInput{ Id: rs.Primary.ID, }) @@ -108,10 +110,10 @@ func testAccCheckPingAccessSiteExists(n string) resource.TestCheckFunc { func Test_resourcePingAccessSiteReadData(t *testing.T) { cases := []struct { - Site pa.SiteView + Site models.SiteView }{ { - Site: pa.SiteView{ + Site: models.SiteView{ Name: String("demo"), Targets: &[]*string{String("localhost:1234")}, AvailabilityProfileId: Int(1), @@ -130,7 +132,7 @@ func Test_resourcePingAccessSiteReadData(t *testing.T) { }, }, { - Site: pa.SiteView{ + Site: models.SiteView{ Name: String("demo"), Targets: &[]*string{String("localhost:1234")}, AvailabilityProfileId: Int(0), diff --git a/pingaccess/resource_pingaccess_third_party_service.go b/pingaccess/resource_pingaccess_third_party_service.go index ec8e5782..7584554e 100644 --- a/pingaccess/resource_pingaccess_third_party_service.go +++ b/pingaccess/resource_pingaccess_third_party_service.go @@ -3,9 +3,11 @@ package pingaccess import ( "context" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessThirdPartyService() *schema.Resource { @@ -82,8 +84,8 @@ func resourcePingAccessThirdPartyServiceSchema() map[string]*schema.Schema { } func resourcePingAccessThirdPartyServiceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).ThirdPartyServices - input := pa.AddThirdPartyServiceCommandInput{ + svc := m.(paClient).ThirdPartyServices + input := thirdPartyServices.AddThirdPartyServiceCommandInput{ Body: *resourcePingAccessThirdPartyServiceReadData(d), } @@ -98,8 +100,8 @@ func resourcePingAccessThirdPartyServiceCreate(ctx context.Context, d *schema.Re } func resourcePingAccessThirdPartyServiceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).ThirdPartyServices - input := &pa.GetThirdPartyServiceCommandInput{ + svc := m.(paClient).ThirdPartyServices + input := &thirdPartyServices.GetThirdPartyServiceCommandInput{ Id: d.Id(), } result, _, err := svc.GetThirdPartyServiceCommand(input) @@ -110,8 +112,8 @@ func resourcePingAccessThirdPartyServiceRead(ctx context.Context, d *schema.Reso } func resourcePingAccessThirdPartyServiceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).ThirdPartyServices - input := pa.UpdateThirdPartyServiceCommandInput{ + svc := m.(paClient).ThirdPartyServices + input := thirdPartyServices.UpdateThirdPartyServiceCommandInput{ Body: *resourcePingAccessThirdPartyServiceReadData(d), Id: d.Id(), } @@ -123,9 +125,9 @@ func resourcePingAccessThirdPartyServiceUpdate(ctx context.Context, d *schema.Re } func resourcePingAccessThirdPartyServiceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).ThirdPartyServices + svc := m.(paClient).ThirdPartyServices - input := &pa.DeleteThirdPartyServiceCommandInput{ + input := &thirdPartyServices.DeleteThirdPartyServiceCommandInput{ Id: d.Id(), } @@ -136,7 +138,7 @@ func resourcePingAccessThirdPartyServiceDelete(ctx context.Context, d *schema.Re return nil } -func resourcePingAccessThirdPartyServiceReadResult(d *schema.ResourceData, input *pa.ThirdPartyServiceView) diag.Diagnostics { +func resourcePingAccessThirdPartyServiceReadResult(d *schema.ResourceData, input *models.ThirdPartyServiceView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataIntWithDiagnostic(d, "availability_profile_id", input.AvailabilityProfileId, &diags) setResourceDataStringWithDiagnostic(d, "expected_hostname", input.ExpectedHostname, &diags) @@ -154,9 +156,9 @@ func resourcePingAccessThirdPartyServiceReadResult(d *schema.ResourceData, input return diags } -func resourcePingAccessThirdPartyServiceReadData(d *schema.ResourceData) *pa.ThirdPartyServiceView { +func resourcePingAccessThirdPartyServiceReadData(d *schema.ResourceData) *models.ThirdPartyServiceView { targets := expandStringList(d.Get("targets").(*schema.Set).List()) - tps := &pa.ThirdPartyServiceView{ + tps := &models.ThirdPartyServiceView{ Name: String(d.Get("name").(string)), Targets: &targets, } diff --git a/pingaccess/resource_pingaccess_third_party_service_test.go b/pingaccess/resource_pingaccess_third_party_service_test.go index 22d35bdf..b3bdd9e0 100644 --- a/pingaccess/resource_pingaccess_third_party_service_test.go +++ b/pingaccess/resource_pingaccess_third_party_service_test.go @@ -4,11 +4,13 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessThirdPartyService(t *testing.T) { @@ -80,8 +82,8 @@ func testAccCheckPingAccessThirdPartyServiceExists(n string) resource.TestCheckF return fmt.Errorf("no third party service ID is set") } - conn := testAccProvider.Meta().(*pa.Client).ThirdPartyServices - result, _, err := conn.GetThirdPartyServiceCommand(&pa.GetThirdPartyServiceCommandInput{ + conn := testAccProvider.Meta().(paClient).ThirdPartyServices + result, _, err := conn.GetThirdPartyServiceCommand(&thirdPartyServices.GetThirdPartyServiceCommandInput{ Id: rs.Primary.ID, }) @@ -99,10 +101,10 @@ func testAccCheckPingAccessThirdPartyServiceExists(n string) resource.TestCheckF func Test_resourcePingAccessThirdPartyServiceReadData(t *testing.T) { cases := []struct { - ThirdPartyService pa.ThirdPartyServiceView + ThirdPartyService models.ThirdPartyServiceView }{ { - ThirdPartyService: pa.ThirdPartyServiceView{ + ThirdPartyService: models.ThirdPartyServiceView{ Name: String("localhost"), Targets: &[]*string{String("localhost:1234")}, AvailabilityProfileId: Int(1), @@ -117,7 +119,7 @@ func Test_resourcePingAccessThirdPartyServiceReadData(t *testing.T) { }, }, { - ThirdPartyService: pa.ThirdPartyServiceView{ + ThirdPartyService: models.ThirdPartyServiceView{ Name: String("localhost"), Targets: &[]*string{String("localhost:1234")}, AvailabilityProfileId: Int(0), diff --git a/pingaccess/resource_pingaccess_trusted_certificate_groups.go b/pingaccess/resource_pingaccess_trusted_certificate_groups.go index a4199214..9a186c31 100644 --- a/pingaccess/resource_pingaccess_trusted_certificate_groups.go +++ b/pingaccess/resource_pingaccess_trusted_certificate_groups.go @@ -4,10 +4,12 @@ import ( "context" "strconv" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessTrustedCertificateGroups() *schema.Resource { @@ -60,8 +62,8 @@ func resourcePingAccessTrustedCertificateGroupsSchema() map[string]*schema.Schem } func resourcePingAccessTrustedCertificateGroupsCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).TrustedCertificateGroups - input := pa.AddTrustedCertificateGroupCommandInput{ + svc := m.(paClient).TrustedCertificateGroups + input := trustedCertificateGroups.AddTrustedCertificateGroupCommandInput{ Body: *resourcePingAccessTrustedCertificateGroupsReadData(d), } @@ -75,8 +77,8 @@ func resourcePingAccessTrustedCertificateGroupsCreate(_ context.Context, d *sche } func resourcePingAccessTrustedCertificateGroupsRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).TrustedCertificateGroups - input := &pa.GetTrustedCertificateGroupCommandInput{ + svc := m.(paClient).TrustedCertificateGroups + input := &trustedCertificateGroups.GetTrustedCertificateGroupCommandInput{ Id: d.Id(), } result, _, err := svc.GetTrustedCertificateGroupCommand(input) @@ -87,8 +89,8 @@ func resourcePingAccessTrustedCertificateGroupsRead(_ context.Context, d *schema } func resourcePingAccessTrustedCertificateGroupsUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).TrustedCertificateGroups - input := pa.UpdateTrustedCertificateGroupCommandInput{ + svc := m.(paClient).TrustedCertificateGroups + input := trustedCertificateGroups.UpdateTrustedCertificateGroupCommandInput{ Body: *resourcePingAccessTrustedCertificateGroupsReadData(d), Id: d.Id(), } @@ -100,8 +102,8 @@ func resourcePingAccessTrustedCertificateGroupsUpdate(_ context.Context, d *sche } func resourcePingAccessTrustedCertificateGroupsDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).TrustedCertificateGroups - input := &pa.DeleteTrustedCertificateGroupCommandInput{ + svc := m.(paClient).TrustedCertificateGroups + input := &trustedCertificateGroups.DeleteTrustedCertificateGroupCommandInput{ Id: d.Id(), } @@ -112,7 +114,7 @@ func resourcePingAccessTrustedCertificateGroupsDelete(_ context.Context, d *sche return nil } -func resourcePingAccessTrustedCertificateGroupsReadResult(d *schema.ResourceData, input *pa.TrustedCertificateGroupView) diag.Diagnostics { +func resourcePingAccessTrustedCertificateGroupsReadResult(d *schema.ResourceData, input *models.TrustedCertificateGroupView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataBoolWithDiagnostic(d, "ignore_all_certificate_errors", input.IgnoreAllCertificateErrors, &diags) setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) @@ -133,9 +135,9 @@ func resourcePingAccessTrustedCertificateGroupsReadResult(d *schema.ResourceData return diags } -func resourcePingAccessTrustedCertificateGroupsReadData(d *schema.ResourceData) *pa.TrustedCertificateGroupView { +func resourcePingAccessTrustedCertificateGroupsReadData(d *schema.ResourceData) *models.TrustedCertificateGroupView { - tcg := &pa.TrustedCertificateGroupView{ + tcg := &models.TrustedCertificateGroupView{ Name: String(d.Get("name").(string)), } diff --git a/pingaccess/resource_pingaccess_trusted_certificate_groups_test.go b/pingaccess/resource_pingaccess_trusted_certificate_groups_test.go index 0744683a..bd1bb742 100644 --- a/pingaccess/resource_pingaccess_trusted_certificate_groups_test.go +++ b/pingaccess/resource_pingaccess_trusted_certificate_groups_test.go @@ -4,11 +4,13 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessTrustedCertificateGroups(t *testing.T) { @@ -57,8 +59,8 @@ func testAccCheckPingAccessTrustedCertificateGroupsExists(n string) resource.Tes return fmt.Errorf("No third party service ID is set") } - conn := testAccProvider.Meta().(*pa.Client).TrustedCertificateGroups - result, _, err := conn.GetTrustedCertificateGroupCommand(&pa.GetTrustedCertificateGroupCommandInput{ + conn := testAccProvider.Meta().(paClient).TrustedCertificateGroups + result, _, err := conn.GetTrustedCertificateGroupCommand(&trustedCertificateGroups.GetTrustedCertificateGroupCommandInput{ Id: rs.Primary.ID, }) @@ -76,10 +78,10 @@ func testAccCheckPingAccessTrustedCertificateGroupsExists(n string) resource.Tes func Test_resourcePingAccessTrustedCertificateGroupsReadData(t *testing.T) { cases := []struct { - TrustedCertificateGroups pa.TrustedCertificateGroupView + TrustedCertificateGroups models.TrustedCertificateGroupView }{ { - TrustedCertificateGroups: pa.TrustedCertificateGroupView{ + TrustedCertificateGroups: models.TrustedCertificateGroupView{ Name: String("localhost"), CertIds: &[]*int{Int(1)}, UseJavaTrustStore: Bool(false), diff --git a/pingaccess/resource_pingaccess_virtualhost.go b/pingaccess/resource_pingaccess_virtualhost.go index 3041365d..be10669b 100644 --- a/pingaccess/resource_pingaccess_virtualhost.go +++ b/pingaccess/resource_pingaccess_virtualhost.go @@ -3,10 +3,12 @@ package pingaccess import ( "context" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessVirtualHost() *schema.Resource { @@ -52,8 +54,8 @@ func resourcePingAccessVirtualHostSchema() map[string]*schema.Schema { } func resourcePingAccessVirtualHostCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Virtualhosts - input := pa.AddVirtualHostCommandInput{ + svc := m.(paClient).Virtualhosts + input := virtualhosts.AddVirtualHostCommandInput{ Body: *resourcePingAccessVirtualHostReadData(d), } @@ -67,8 +69,8 @@ func resourcePingAccessVirtualHostCreate(ctx context.Context, d *schema.Resource } func resourcePingAccessVirtualHostRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Virtualhosts - input := &pa.GetVirtualHostCommandInput{ + svc := m.(paClient).Virtualhosts + input := &virtualhosts.GetVirtualHostCommandInput{ Id: d.Id(), } result, _, err := svc.GetVirtualHostCommand(input) @@ -79,8 +81,8 @@ func resourcePingAccessVirtualHostRead(ctx context.Context, d *schema.ResourceDa } func resourcePingAccessVirtualHostUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Virtualhosts - input := pa.UpdateVirtualHostCommandInput{ + svc := m.(paClient).Virtualhosts + input := virtualhosts.UpdateVirtualHostCommandInput{ Body: *resourcePingAccessVirtualHostReadData(d), Id: d.Id(), } @@ -93,8 +95,8 @@ func resourcePingAccessVirtualHostUpdate(ctx context.Context, d *schema.Resource } func resourcePingAccessVirtualHostDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).Virtualhosts - input := &pa.DeleteVirtualHostCommandInput{ + svc := m.(paClient).Virtualhosts + input := &virtualhosts.DeleteVirtualHostCommandInput{ Id: d.Id(), } @@ -105,7 +107,7 @@ func resourcePingAccessVirtualHostDelete(ctx context.Context, d *schema.Resource return nil } -func resourcePingAccessVirtualHostReadResult(d *schema.ResourceData, input *pa.VirtualHostView) diag.Diagnostics { +func resourcePingAccessVirtualHostReadResult(d *schema.ResourceData, input *models.VirtualHostView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "host", input.Host, &diags) setResourceDataIntWithDiagnostic(d, "port", input.Port, &diags) @@ -116,8 +118,8 @@ func resourcePingAccessVirtualHostReadResult(d *schema.ResourceData, input *pa.V return diags } -func resourcePingAccessVirtualHostReadData(d *schema.ResourceData) *pa.VirtualHostView { - vh := &pa.VirtualHostView{ +func resourcePingAccessVirtualHostReadData(d *schema.ResourceData) *models.VirtualHostView { + vh := &models.VirtualHostView{ Host: String(d.Get("host").(string)), Port: Int(d.Get("port").(int)), } diff --git a/pingaccess/resource_pingaccess_virtualhost_test.go b/pingaccess/resource_pingaccess_virtualhost_test.go index 2f9803ab..b962f995 100644 --- a/pingaccess/resource_pingaccess_virtualhost_test.go +++ b/pingaccess/resource_pingaccess_virtualhost_test.go @@ -4,11 +4,13 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessVirtualHost(t *testing.T) { @@ -61,8 +63,8 @@ func testAccCheckPingAccessVirtualHostExists(n string) resource.TestCheckFunc { return fmt.Errorf("No virtualhost ID is set") } - conn := testAccProvider.Meta().(*pa.Client).Virtualhosts - result, _, err := conn.GetVirtualHostCommand(&pa.GetVirtualHostCommandInput{ + conn := testAccProvider.Meta().(paClient).Virtualhosts + result, _, err := conn.GetVirtualHostCommand(&virtualhosts.GetVirtualHostCommandInput{ Id: rs.Primary.ID, }) @@ -80,10 +82,10 @@ func testAccCheckPingAccessVirtualHostExists(n string) resource.TestCheckFunc { func Test_resourcePingAccessVirtualhostReadData(t *testing.T) { cases := []struct { - VirtualHost pa.VirtualHostView + VirtualHost models.VirtualHostView }{ { - VirtualHost: pa.VirtualHostView{ + VirtualHost: models.VirtualHostView{ Host: String("localhost"), Port: Int(9999), AgentResourceCacheTTL: Int(0), @@ -92,7 +94,7 @@ func Test_resourcePingAccessVirtualhostReadData(t *testing.T) { }, }, { - VirtualHost: pa.VirtualHostView{ + VirtualHost: models.VirtualHostView{ Host: String("localhost"), Port: Int(9999), AgentResourceCacheTTL: Int(30), diff --git a/pingaccess/resource_pingaccess_websession.go b/pingaccess/resource_pingaccess_websession.go index eee8f139..a770e24a 100644 --- a/pingaccess/resource_pingaccess_websession.go +++ b/pingaccess/resource_pingaccess_websession.go @@ -3,10 +3,12 @@ package pingaccess import ( "context" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/webSessions" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func resourcePingAccessWebSession() *schema.Resource { @@ -145,8 +147,8 @@ func resourcePingAccessWebSessionSchema() map[string]*schema.Schema { } func resourcePingAccessWebSessionCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).WebSessions - input := pa.AddWebSessionCommandInput{ + svc := m.(paClient).WebSessions + input := webSessions.AddWebSessionCommandInput{ Body: *resourcePingAccessWebSessionReadData(d), } @@ -160,8 +162,8 @@ func resourcePingAccessWebSessionCreate(_ context.Context, d *schema.ResourceDat } func resourcePingAccessWebSessionRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).WebSessions - input := &pa.GetWebSessionCommandInput{ + svc := m.(paClient).WebSessions + input := &webSessions.GetWebSessionCommandInput{ Id: d.Id(), } result, _, err := svc.GetWebSessionCommand(input) @@ -172,8 +174,8 @@ func resourcePingAccessWebSessionRead(_ context.Context, d *schema.ResourceData, } func resourcePingAccessWebSessionUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).WebSessions - input := pa.UpdateWebSessionCommandInput{ + svc := m.(paClient).WebSessions + input := webSessions.UpdateWebSessionCommandInput{ Body: *resourcePingAccessWebSessionReadData(d), Id: d.Id(), } @@ -185,8 +187,8 @@ func resourcePingAccessWebSessionUpdate(_ context.Context, d *schema.ResourceDat } func resourcePingAccessWebSessionDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - svc := m.(*pa.Client).WebSessions - input := &pa.DeleteWebSessionCommandInput{ + svc := m.(paClient).WebSessions + input := &webSessions.DeleteWebSessionCommandInput{ Id: d.Id(), } @@ -197,7 +199,7 @@ func resourcePingAccessWebSessionDelete(_ context.Context, d *schema.ResourceDat return nil } -func resourcePingAccessWebSessionReadResult(d *schema.ResourceData, input *pa.WebSessionView) diag.Diagnostics { +func resourcePingAccessWebSessionReadResult(d *schema.ResourceData, input *models.WebSessionView) diag.Diagnostics { var diags diag.Diagnostics setResourceDataStringWithDiagnostic(d, "audience", input.Audience, &diags) setResourceDataStringWithDiagnostic(d, "name", input.Name, &diags) @@ -239,8 +241,8 @@ func resourcePingAccessWebSessionReadResult(d *schema.ResourceData, input *pa.We return diags } -func resourcePingAccessWebSessionReadData(d *schema.ResourceData) *pa.WebSessionView { - websession := &pa.WebSessionView{ +func resourcePingAccessWebSessionReadData(d *schema.ResourceData) *models.WebSessionView { + websession := &models.WebSessionView{ Audience: String(d.Get("audience").(string)), Name: String(d.Get("name").(string)), ClientCredentials: expandOAuthClientCredentialsView(d.Get("client_credentials").([]interface{})), diff --git a/pingaccess/resource_pingaccess_websession_test.go b/pingaccess/resource_pingaccess_websession_test.go index 1865ce71..e179abf4 100644 --- a/pingaccess/resource_pingaccess_websession_test.go +++ b/pingaccess/resource_pingaccess_websession_test.go @@ -4,11 +4,13 @@ import ( "fmt" "testing" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/services/webSessions" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func TestAccPingAccessWebSession(t *testing.T) { @@ -68,8 +70,8 @@ func testAccCheckPingAccessWebSessionExists(n string) resource.TestCheckFunc { return fmt.Errorf("No websession ID is set") } - conn := testAccProvider.Meta().(*pa.Client).WebSessions - result, _, err := conn.GetWebSessionCommand(&pa.GetWebSessionCommandInput{ + conn := testAccProvider.Meta().(paClient).WebSessions + result, _, err := conn.GetWebSessionCommand(&webSessions.GetWebSessionCommandInput{ Id: rs.Primary.ID, }) @@ -87,15 +89,15 @@ func testAccCheckPingAccessWebSessionExists(n string) resource.TestCheckFunc { func Test_resourcePingAccessWebSessionReadData(t *testing.T) { cases := []struct { - WebSession pa.WebSessionView + WebSession models.WebSessionView }{ { - WebSession: pa.WebSessionView{ + WebSession: models.WebSessionView{ Audience: String("localhost"), Name: String("localhost"), - ClientCredentials: &pa.OAuthClientCredentialsView{ + ClientCredentials: &models.OAuthClientCredentialsView{ ClientId: String("client"), - ClientSecret: &pa.HiddenFieldView{}, + ClientSecret: &models.HiddenFieldView{}, }, WebStorageType: String("SessionStorage"), CacheUserAttributes: Bool(true), @@ -119,12 +121,12 @@ func Test_resourcePingAccessWebSessionReadData(t *testing.T) { }, }, { - WebSession: pa.WebSessionView{ + WebSession: models.WebSessionView{ Audience: String("localhost"), Name: String("localhost"), - ClientCredentials: &pa.OAuthClientCredentialsView{ + ClientCredentials: &models.OAuthClientCredentialsView{ ClientId: String("client"), - ClientSecret: &pa.HiddenFieldView{}, + ClientSecret: &models.HiddenFieldView{}, }, WebStorageType: String("SessionStorage"), CacheUserAttributes: Bool(false), diff --git a/pingaccess/structures.go b/pingaccess/structures.go index e3792675..3c3d86d4 100644 --- a/pingaccess/structures.go +++ b/pingaccess/structures.go @@ -9,12 +9,13 @@ import ( "strconv" "strings" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/tidwall/gjson" "github.com/tidwall/sjson" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func setOfString() *schema.Schema { @@ -148,8 +149,8 @@ func requiredHiddenField() *schema.Schema { return sch } -func expandHiddenFieldView(in []interface{}) *pa.HiddenFieldView { - hf := &pa.HiddenFieldView{} +func expandHiddenFieldView(in []interface{}) *models.HiddenFieldView { + hf := &models.HiddenFieldView{} for _, raw := range in { if raw == nil { return hf @@ -165,7 +166,7 @@ func expandHiddenFieldView(in []interface{}) *pa.HiddenFieldView { return hf } -func flattenHiddenFieldView(in *pa.HiddenFieldView) []map[string]interface{} { +func flattenHiddenFieldView(in *models.HiddenFieldView) []map[string]interface{} { m := make([]map[string]interface{}, 0, 1) s := make(map[string]interface{}) if in.Value != nil { @@ -178,8 +179,8 @@ func flattenHiddenFieldView(in *pa.HiddenFieldView) []map[string]interface{} { return m } -func expandOAuthClientCredentialsView(in []interface{}) *pa.OAuthClientCredentialsView { - hf := &pa.OAuthClientCredentialsView{} +func expandOAuthClientCredentialsView(in []interface{}) *models.OAuthClientCredentialsView { + hf := &models.OAuthClientCredentialsView{} for _, raw := range in { l := raw.(map[string]interface{}) if val, ok := l["client_id"]; ok { @@ -192,7 +193,7 @@ func expandOAuthClientCredentialsView(in []interface{}) *pa.OAuthClientCredentia return hf } -func flattenOAuthClientCredentialsView(in *pa.OAuthClientCredentialsView) []map[string]interface{} { +func flattenOAuthClientCredentialsView(in *models.OAuthClientCredentialsView) []map[string]interface{} { m := make([]map[string]interface{}, 0, 1) s := make(map[string]interface{}) if in.ClientId != nil { @@ -217,10 +218,10 @@ func flattenIdentityMappingIds(in map[string]*int) []interface{} { return []interface{}{m} } -func expandPolicyItem(in []interface{}) []*pa.PolicyItem { - var policies []*pa.PolicyItem +func expandPolicyItem(in []interface{}) []*models.PolicyItem { + var policies []*models.PolicyItem for _, raw := range in { - policy := &pa.PolicyItem{} + policy := &models.PolicyItem{} l := raw.(map[string]interface{}) if val, ok := l["id"]; ok { policy.Id = json.Number(val.(string)) @@ -233,7 +234,7 @@ func expandPolicyItem(in []interface{}) []*pa.PolicyItem { return policies } -func flattenPolicyItem(in []*pa.PolicyItem) []interface{} { +func flattenPolicyItem(in []*models.PolicyItem) []interface{} { var m []interface{} for _, v := range in { s := make(map[string]interface{}) @@ -244,11 +245,11 @@ func flattenPolicyItem(in []*pa.PolicyItem) []interface{} { return m } -func expandPolicy(in []interface{}) map[string]*[]*pa.PolicyItem { - ca := map[string]*[]*pa.PolicyItem{} +func expandPolicy(in []interface{}) map[string]*[]*models.PolicyItem { + ca := map[string]*[]*models.PolicyItem{} - webPolicies := make([]*pa.PolicyItem, 0) - apiPolicies := make([]*pa.PolicyItem, 0) + webPolicies := make([]*models.PolicyItem, 0) + apiPolicies := make([]*models.PolicyItem, 0) for _, raw := range in { if raw != nil { l := raw.(map[string]interface{}) @@ -267,7 +268,7 @@ func expandPolicy(in []interface{}) map[string]*[]*pa.PolicyItem { return ca } -func flattenPolicy(in map[string]*[]*pa.PolicyItem) []interface{} { +func flattenPolicy(in map[string]*[]*models.PolicyItem) []interface{} { // m := make([]map[string]interface{}, 0, 1) var m []interface{} s := make(map[string]interface{}) @@ -309,7 +310,7 @@ func expandIntList(configured []interface{}) []*int { return vs } -func flattenChainCertificates(in []*pa.ChainCertificateView) *schema.Set { +func flattenChainCertificates(in []*models.ChainCertificateView) *schema.Set { var m []interface{} for _, v := range in { s := make(map[string]interface{}) @@ -339,7 +340,7 @@ func configFieldHash(v interface{}) int { return hashString(buf.String()) } -func flattenLinkViewList(in []*pa.LinkView) []interface{} { +func flattenLinkViewList(in []*models.LinkView) []interface{} { var m []interface{} for _, v := range in { m = append(m, flattenLinkView(v)) @@ -347,7 +348,7 @@ func flattenLinkViewList(in []*pa.LinkView) []interface{} { return m } -func flattenLinkView(in *pa.LinkView) map[string]interface{} { +func flattenLinkView(in *models.LinkView) map[string]interface{} { s := make(map[string]interface{}) if in.Id != nil { s["id"] = *in.Id @@ -364,7 +365,7 @@ func flattenLinkView(in *pa.LinkView) map[string]interface{} { // fields. // // TODO This has a drawback that we cannot detect drift in CONCEALED fields due to the way the PingAccess API works. -func maskConfigFromRuleDescriptors(desc *pa.RuleDescriptorsView, input *string, originalConfig string, config string) string { +func maskConfigFromRuleDescriptors(desc *models.RuleDescriptorsView, input *string, originalConfig string, config string) string { for _, value := range desc.Items { if *value.ClassName == *input { config = maskConfigFromRuleDescriptor(value, String(""), originalConfig, config) @@ -373,7 +374,7 @@ func maskConfigFromRuleDescriptors(desc *pa.RuleDescriptorsView, input *string, return config } -func maskConfigFromRuleDescriptor(desc *pa.RuleDescriptorView, input *string, originalConfig string, config string) string { +func maskConfigFromRuleDescriptor(desc *models.RuleDescriptorView, input *string, originalConfig string, config string) string { for _, c := range desc.ConfigurationFields { config = maskConfigFromConfigurationField(c, input, originalConfig, config) } @@ -386,7 +387,7 @@ func maskConfigFromRuleDescriptor(desc *pa.RuleDescriptorView, input *string, or // fields. // // TODO This has a drawback that we cannot detect drift in CONCEALED fields due to the way the PingAccess API works. -func maskConfigFromDescriptors(desc *pa.DescriptorsView, input *string, originalConfig string, config string) string { +func maskConfigFromDescriptors(desc *models.DescriptorsView, input *string, originalConfig string, config string) string { for _, value := range desc.Items { if *value.ClassName == *input { config = maskConfigFromDescriptor(value, String(""), originalConfig, config) @@ -395,14 +396,14 @@ func maskConfigFromDescriptors(desc *pa.DescriptorsView, input *string, original return config } -func maskConfigFromDescriptor(desc *pa.DescriptorView, input *string, originalConfig string, config string) string { +func maskConfigFromDescriptor(desc *models.DescriptorView, input *string, originalConfig string, config string) string { for _, c := range desc.ConfigurationFields { config = maskConfigFromConfigurationField(c, input, originalConfig, config) } return config } -func maskConfigFromConfigurationField(field *pa.ConfigurationField, input *string, originalConfig string, config string) string { +func maskConfigFromConfigurationField(field *models.ConfigurationField, input *string, originalConfig string, config string) string { if *field.Type == "CONCEALED" { path := fmt.Sprintf("%s.value", *field.Name) v := gjson.Get(originalConfig, path) @@ -426,7 +427,7 @@ func maskConfigFromConfigurationField(field *pa.ConfigurationField, input *strin //Checks all the fields in the descriptor to ensure all required fields are set // -func descriptorsHasClassName(className string, desc *pa.DescriptorsView) error { +func descriptorsHasClassName(className string, desc *models.DescriptorsView) error { var classes []string for _, value := range desc.Items { classes = append(classes, *value.ClassName) @@ -439,7 +440,7 @@ func descriptorsHasClassName(className string, desc *pa.DescriptorsView) error { //Checks the class name specified exists in the DescriptorsView // -func validateConfiguration(className string, d *schema.ResourceDiff, desc *pa.DescriptorsView) error { +func validateConfiguration(className string, d *schema.ResourceDiff, desc *models.DescriptorsView) error { var diags diag.Diagnostics conf := d.Get("configuration").(string) if conf == "" { @@ -471,7 +472,7 @@ func validateConfiguration(className string, d *schema.ResourceDiff, desc *pa.De //Checks the class name specified exists in the RuleDescriptorsView // -func ruleDescriptorsHasClassName(className string, desc *pa.RuleDescriptorsView) error { +func ruleDescriptorsHasClassName(className string, desc *models.RuleDescriptorsView) error { var classes []string for _, value := range desc.Items { classes = append(classes, *value.ClassName) @@ -484,7 +485,7 @@ func ruleDescriptorsHasClassName(className string, desc *pa.RuleDescriptorsView) //Checks all the fields in the Rule descriptor to ensure all required fields are set // -func validateRulesConfiguration(className string, d *schema.ResourceDiff, desc *pa.RuleDescriptorsView) error { +func validateRulesConfiguration(className string, d *schema.ResourceDiff, desc *models.RuleDescriptorsView) error { var diags diag.Diagnostics conf := d.Get("configuration").(string) if conf == "" { diff --git a/pingaccess/structures_test.go b/pingaccess/structures_test.go index 43624e78..244124d9 100644 --- a/pingaccess/structures_test.go +++ b/pingaccess/structures_test.go @@ -2,9 +2,8 @@ package pingaccess import ( "encoding/json" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" "testing" - - pa "github.com/iwarapter/pingaccess-sdk-go/pingaccess" ) func testHiddenFieldView() map[string]interface{} { @@ -15,7 +14,7 @@ func testHiddenFieldView() map[string]interface{} { } func Test_weCanFlattenHiddenFieldView(t *testing.T) { - initialHiddenFieldView := &pa.HiddenFieldView{ + initialHiddenFieldView := &models.HiddenFieldView{ Value: String("atat"), EncryptedValue: String("atat"), } @@ -49,9 +48,9 @@ func testOAuthClientCredentials() map[string]interface{} { } func Test_weCanFlattenOAuthClientCredentials(t *testing.T) { - initialOAuthClientCredentialsView := &pa.OAuthClientCredentialsView{ + initialOAuthClientCredentialsView := &models.OAuthClientCredentialsView{ ClientId: String("atat"), - ClientSecret: &pa.HiddenFieldView{ + ClientSecret: &models.HiddenFieldView{ Value: String("atat"), EncryptedValue: String("atat"), }, @@ -81,8 +80,8 @@ func testPolicyItem() map[string]interface{} { } func Test_weCanFlattenPolicy(t *testing.T) { - initialPolicyItem := []*pa.PolicyItem{ - &pa.PolicyItem{ + initialPolicyItem := []*models.PolicyItem{ + &models.PolicyItem{ Id: json.Number("1334"), Type: String("Rule"), }, diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 2133562b..7ad400f2 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -6,6 +6,10 @@ // // This package is intended to be a more powerful and safer alternative to // reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. // // The primary features of cmp are: // @@ -22,8 +26,8 @@ // equality is determined by recursively comparing the primitive kinds on both // values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported // fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared -// using the AllowUnexported option. +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly +// compared using the Exporter option. package cmp import ( @@ -62,8 +66,8 @@ import ( // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option -// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported -// option explicitly permits comparing the unexported field. +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +// explicitly permits comparing the unexported field. // // Slices are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored slice or array elements report equal. @@ -80,6 +84,11 @@ import ( // Pointers and interfaces are equal if they are both nil or both non-nil, // where they have the same underlying concrete type and recursively // calling Equal on the underlying values reports equal. +// +// Before recursing into a pointer, slice element, or map, the current path +// is checked to detect whether the address has already been visited. +// If there is a cycle, then the pointed at values are considered equal +// only if both addresses were previously visited in the same path step. func Equal(x, y interface{}, opts ...Option) bool { vx := reflect.ValueOf(x) vy := reflect.ValueOf(y) @@ -137,6 +146,7 @@ type state struct { // Calling statelessCompare must not result in observable changes to these. result diff.Result // The current result of comparison curPath Path // The current path in the value tree + curPtrs pointerPath // The current set of visited pointers reporters []reporter // Optional reporters // recChecker checks for infinite cycles applying the same set of @@ -148,13 +158,14 @@ type state struct { dynChecker dynChecker // These fields, once set by processOption, will not change. - exporters map[reflect.Type]bool // Set of structs with unexported field visibility - opts Options // List of all fundamental and filter options + exporters []exporter // List of exporters for structs with unexported fields + opts Options // List of all fundamental and filter options } func newState(opts []Option) *state { // Always ensure a validator option exists to validate the inputs. s := &state{opts: Options{validator{}}} + s.curPtrs.Init() s.processOption(Options(opts)) return s } @@ -174,13 +185,8 @@ func (s *state) processOption(opt Option) { panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) } s.opts = append(s.opts, opt) - case visibleStructs: - if s.exporters == nil { - s.exporters = make(map[reflect.Type]bool) - } - for t := range opt { - s.exporters[t] = true - } + case exporter: + s.exporters = append(s.exporters, opt) case reporter: s.reporters = append(s.reporters, opt) default: @@ -192,9 +198,9 @@ func (s *state) processOption(opt Option) { // This function is stateless in that it does not alter the current result, // or output to any registered reporters. func (s *state) statelessCompare(step PathStep) diff.Result { - // We do not save and restore the curPath because all of the compareX - // methods should properly push and pop from the path. - // It is an implementation bug if the contents of curPath differs from + // We do not save and restore curPath and curPtrs because all of the + // compareX methods should properly push and pop from them. + // It is an implementation bug if the contents of the paths differ from // when calling this function to when returning from it. oldResult, oldReporters := s.result, s.reporters @@ -216,9 +222,17 @@ func (s *state) compareAny(step PathStep) { } s.recChecker.Check(s.curPath) - // Obtain the current type and values. + // Cycle-detection for slice elements (see NOTE in compareSlice). t := step.Type() vx, vy := step.Values() + if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { + px, py := vx.Addr(), vy.Addr() + if eq, visited := s.curPtrs.Push(px, py); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(px, py) + } // Rule 1: Check whether an option applies on this node in the value tree. if s.tryOptions(t, vx, vy) { @@ -354,6 +368,7 @@ func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { var vax, vay reflect.Value // Addressable versions of vx and vy + var mayForce, mayForceInit bool step := StructField{&structField{}} for i := 0; i < t.NumField(); i++ { step.typ = t.Field(i).Type @@ -375,7 +390,13 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { vax = makeAddressable(vx) vay = makeAddressable(vy) } - step.mayForce = s.exporters[t] + if !mayForceInit { + for _, xf := range s.exporters { + mayForce = mayForce || xf(t) + } + mayForceInit = true + } + step.mayForce = mayForce step.pvx = vax step.pvy = vay step.field = t.Field(i) @@ -391,9 +412,21 @@ func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer + // since slices represents a list of pointers, rather than a single pointer. + // The pointer checking logic must be handled on a per-element basis + // in compareAny. + // + // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting + // pointer P, a length N, and a capacity C. Supposing each slice element has + // a memory size of M, then the slice is equivalent to the list of pointers: + // [P+i*M for i in range(N)] + // + // For example, v[:0] and v[:1] are slices with the same starting pointer, + // but they are clearly different values. Using the slice pointer alone + // violates the assumption that equal pointers implies equal values. - step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} withIndexes := func(ix, iy int) SliceIndex { if ix >= 0 { step.vx, step.xkey = vx.Index(ix), ix @@ -470,7 +503,12 @@ func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // Cycle-detection for maps. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) // We combine and sort the two map keys so that we can perform the // comparisons in a deterministic order. @@ -507,7 +545,12 @@ func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // Cycle-detection for pointers. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) vx, vy = vx.Elem(), vy.Elem() s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index abc3a1c3..dd032354 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -8,8 +8,8 @@ package cmp import "reflect" -const supportAllowUnexported = false +const supportExporters = false func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { - panic("retrieveUnexportedField is not implemented") + panic("no support for forcibly accessing unexported fields") } diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 59d4ee91..57020e26 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -11,7 +11,7 @@ import ( "unsafe" ) -const supportAllowUnexported = true +const supportExporters = true // retrieveUnexportedField uses unsafe to forcibly retrieve any field from // a struct such that the value has read-write permissions. @@ -19,5 +19,7 @@ const supportAllowUnexported = true // The parent struct, v, must be addressable, while f must be a StructField // describing the field to retrieve. func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { - return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() + // See https://github.com/google/go-cmp/issues/167 for discussion of the + // following expression. + return reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() } diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 79344816..abbd2a63 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -225,8 +225,20 @@ func (validator) apply(s *state, vx, vy reflect.Value) { // Unable to Interface implies unexported field without visibility access. if !vx.CanInterface() || !vy.CanInterface() { - const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" - panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + var name string + if t := s.curPath.Index(-2).Type(); t.Name() != "" { + // Named type with unexported fields. + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. + var pkgPath string + for i := 0; i < t.NumField() && pkgPath == ""; i++ { + pkgPath = t.Field(i).PkgPath + } + name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) + } + panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) } panic("not reachable") @@ -360,9 +372,8 @@ func (cm comparer) String() string { return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } -// AllowUnexported returns an Option that forcibly allows operations on -// unexported fields in certain structs, which are specified by passing in a -// value of each struct type. +// Exporter returns an Option that specifies whether Equal is allowed to +// introspect into the unexported fields of certain struct types. // // Users of this option must understand that comparing on unexported fields // from external packages is not safe since changes in the internal @@ -386,10 +397,24 @@ func (cm comparer) String() string { // // In other cases, the cmpopts.IgnoreUnexported option can be used to ignore // all unexported fields on specified struct types. -func AllowUnexported(types ...interface{}) Option { - if !supportAllowUnexported { - panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") +func Exporter(f func(reflect.Type) bool) Option { + if !supportExporters { + panic("Exporter is not supported on purego builds") } + return exporter(f) +} + +type exporter func(reflect.Type) bool + +func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// AllowUnexported returns an Options that allows Equal to forcibly introspect +// unexported fields of the specified struct types. +// +// See Exporter for the proper use of this option. +func AllowUnexported(types ...interface{}) Option { m := make(map[reflect.Type]bool) for _, typ := range types { t := reflect.TypeOf(typ) @@ -398,13 +423,7 @@ func AllowUnexported(types ...interface{}) Option { } m[t] = true } - return visibleStructs(m) -} - -type visibleStructs map[reflect.Type]bool - -func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { - panic("not implemented") + return exporter(func(t reflect.Type) bool { return m[t] }) } // Result represents the comparison result for a single node and @@ -436,6 +455,11 @@ func (r Result) ByFunc() bool { return r.flags&reportByFunc != 0 } +// ByCycle reports whether a reference cycle was detected. +func (r Result) ByCycle() bool { + return r.flags&reportByCycle != 0 +} + type resultFlags uint const ( @@ -446,6 +470,7 @@ const ( reportByIgnore reportByMethod reportByFunc + reportByCycle ) // Reporter is an Option that can be passed to Equal. When Equal traverses diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 96fffd29..509d6b85 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -10,6 +10,8 @@ import ( "strings" "unicode" "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" ) // Path is a list of PathSteps describing the sequence of operations to get @@ -41,7 +43,7 @@ type PathStep interface { // In some cases, one or both may be invalid or have restrictions: // • For StructField, both are not interface-able if the current field // is unexported and the struct type is not explicitly permitted by - // AllowUnexported to traverse unexported fields. + // an Exporter to traverse unexported fields. // • For SliceIndex, one may be invalid if an element is missing from // either the x or y slice. // • For MapIndex, one may be invalid if an entry is missing from @@ -207,6 +209,7 @@ type SliceIndex struct{ *sliceIndex } type sliceIndex struct { pathStep xkey, ykey int + isSlice bool // False for reflect.Array } func (si SliceIndex) Type() reflect.Type { return si.typ } @@ -301,6 +304,72 @@ func (tf Transform) Func() reflect.Value { return tf.trans.fnc } // The == operator can be used to detect the exact option used. func (tf Transform) Option() Option { return tf.trans } +// pointerPath represents a dual-stack of pointers encountered when +// recursively traversing the x and y values. This data structure supports +// detection of cycles and determining whether the cycles are equal. +// In Go, cycles can occur via pointers, slices, and maps. +// +// The pointerPath uses a map to represent a stack; where descension into a +// pointer pushes the address onto the stack, and ascension from a pointer +// pops the address from the stack. Thus, when traversing into a pointer from +// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles +// by checking whether the pointer has already been visited. The cycle detection +// uses a seperate stack for the x and y values. +// +// If a cycle is detected we need to determine whether the two pointers +// should be considered equal. The definition of equality chosen by Equal +// requires two graphs to have the same structure. To determine this, both the +// x and y values must have a cycle where the previous pointers were also +// encountered together as a pair. +// +// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and +// MapIndex with pointer information for the x and y values. +// Suppose px and py are two pointers to compare, we then search the +// Path for whether px was ever encountered in the Path history of x, and +// similarly so with py. If either side has a cycle, the comparison is only +// equal if both px and py have a cycle resulting from the same PathStep. +// +// Using a map as a stack is more performant as we can perform cycle detection +// in O(1) instead of O(N) where N is len(Path). +type pointerPath struct { + // mx is keyed by x pointers, where the value is the associated y pointer. + mx map[value.Pointer]value.Pointer + // my is keyed by y pointers, where the value is the associated x pointer. + my map[value.Pointer]value.Pointer +} + +func (p *pointerPath) Init() { + p.mx = make(map[value.Pointer]value.Pointer) + p.my = make(map[value.Pointer]value.Pointer) +} + +// Push indicates intent to descend into pointers vx and vy where +// visited reports whether either has been seen before. If visited before, +// equal reports whether both pointers were encountered together. +// Pop must be called if and only if the pointers were never visited. +// +// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map +// and be non-nil. +func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { + px := value.PointerOf(vx) + py := value.PointerOf(vy) + _, ok1 := p.mx[px] + _, ok2 := p.my[py] + if ok1 || ok2 { + equal = p.mx[px] == py && p.my[py] == px // Pointers paired together + return equal, true + } + p.mx[px] = py + p.my[py] = px + return false, false +} + +// Pop ascends from pointers vx and vy. +func (p pointerPath) Pop(vx, vy reflect.Value) { + delete(p.mx, value.PointerOf(vx)) + delete(p.my, value.PointerOf(vy)) +} + // isExported reports whether the identifier is exported. func isExported(id string) bool { r, _ := utf8.DecodeRuneInString(id) diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 17a05eed..d3fa154d 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -81,14 +81,19 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { return opts.FormatDiffSlice(v) } + var withinSlice bool + if v.parent != nil && (v.parent.Type.Kind() == reflect.Slice || v.parent.Type.Kind() == reflect.Array) { + withinSlice = true + } + // For leaf nodes, format the value based on the reflect.Values alone. if v.MaxDepth == 0 { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. if v.NumDiff == 0 { - outx := opts.FormatValue(v.ValueX, visitedPointers{}) - outy := opts.FormatValue(v.ValueY, visitedPointers{}) + outx := opts.FormatValue(v.ValueX, withinSlice, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, withinSlice, visitedPointers{}) if v.NumIgnored > 0 && v.NumSame == 0 { return textEllipsis } else if outx.Len() < outy.Len() { @@ -101,8 +106,8 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { // Format unequal. assert(opts.DiffMode == diffUnknown) var list textList - outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) - outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, withinSlice, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, withinSlice, visitedPointers{}) if outx != nil { list = append(list, textRecord{Diff: '-', Value: outx}) } @@ -111,9 +116,9 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { } return opts.WithTypeMode(emitType).FormatType(v.Type, list) case diffRemoved: - return opts.FormatValue(v.ValueX, visitedPointers{}) + return opts.FormatValue(v.ValueX, withinSlice, visitedPointers{}) case diffInserted: - return opts.FormatValue(v.ValueY, visitedPointers{}) + return opts.FormatValue(v.ValueY, withinSlice, visitedPointers{}) default: panic("invalid diff mode") } diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 2761b628..8f108833 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -74,7 +74,7 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { // FormatValue prints the reflect.Value, taking extra care to avoid descending // into pointers already in m. As pointers are visited, m is also updated. -func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { +func (opts formatOptions) FormatValue(v reflect.Value, withinSlice bool, m visitedPointers) (out textNode) { if !v.IsValid() { return nil } @@ -108,12 +108,15 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t return textLine(fmt.Sprint(v.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return textLine(fmt.Sprint(v.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - // Unnamed uints are usually bytes or words, so use hexadecimal. - if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if withinSlice { return textLine(formatHex(v.Uint())) } return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) case reflect.Float32, reflect.Float64: return textLine(fmt.Sprint(v.Float())) case reflect.Complex64, reflect.Complex128: @@ -129,7 +132,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t if value.IsZero(vv) { continue // Elide fields with zero values } - s := opts.WithTypeMode(autoType).FormatValue(vv, m) + s := opts.WithTypeMode(autoType).FormatValue(vv, false, m) list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) } return textWrap{"{", list, "}"} @@ -156,7 +159,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t continue } } - s := opts.WithTypeMode(elideType).FormatValue(vi, m) + s := opts.WithTypeMode(elideType).FormatValue(vi, true, m) list = append(list, textRecord{Value: s}) } return textWrap{ptr + "{", list, "}"} @@ -171,7 +174,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t var list textList for _, k := range value.SortKeys(v.MapKeys()) { sk := formatMapKey(k) - sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), false, m) list = append(list, textRecord{Key: sk, Value: sv}) } if opts.PrintAddresses { @@ -189,7 +192,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t ptr = formatPointer(v) } skipType = true // Let the underlying value print the type instead - return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), false, m), ""} case reflect.Interface: if v.IsNil() { return textNil @@ -197,7 +200,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. skipType = true // Print the concrete type instead - return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), false, m) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) } @@ -209,7 +212,7 @@ func formatMapKey(v reflect.Value) string { var opts formatOptions opts.TypeMode = elideType opts.ShallowPointers = true - s := opts.FormatValue(v, visitedPointers{}).String() + s := opts.FormatValue(v, false, visitedPointers{}).String() return strings.TrimSpace(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index eafcf2e4..6f0847e6 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -172,7 +172,9 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Elem().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ss = append(ss, fmt.Sprint(v.Index(i).Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: ss = append(ss, formatHex(v.Index(i).Uint())) case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: ss = append(ss, fmt.Sprint(v.Index(i).Interface())) diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 00000000..d8156a60 --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 00000000..04fdf09f --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 00000000..b4bb97f6 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 00000000..5dc68268 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 00000000..9d92c11f --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 00000000..fa820b9d --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 00000000..5b8a4b9a --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod new file mode 100644 index 00000000..fc84cd79 --- /dev/null +++ b/vendor/github.com/google/uuid/go.mod @@ -0,0 +1 @@ +module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 00000000..b1746163 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 00000000..7f9e0c6c --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 00000000..d651a2b0 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 00000000..24b78edc --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 00000000..0cbbcddb --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 00000000..f326b54d --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 00000000..e6ef06cd --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 00000000..5ea6c737 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 00000000..524404cc --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,245 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 00000000..199a1ac6 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 00000000..84af91c9 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/accessTokenValidators.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/accessTokenValidators.go deleted file mode 100644 index f11debf3..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/accessTokenValidators.go +++ /dev/null @@ -1,185 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type AccessTokenValidatorsService service - -type AccessTokenValidatorsAPI interface { - GetAccessTokenValidatorsCommand(input *GetAccessTokenValidatorsCommandInput) (result *AccessTokenValidatorsView, resp *http.Response, err error) - AddAccessTokenValidatorCommand(input *AddAccessTokenValidatorCommandInput) (result *AccessTokenValidatorView, resp *http.Response, err error) - GetAccessTokenValidatorDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) - DeleteAccessTokenValidatorCommand(input *DeleteAccessTokenValidatorCommandInput) (resp *http.Response, err error) - GetAccessTokenValidatorCommand(input *GetAccessTokenValidatorCommandInput) (result *AccessTokenValidatorView, resp *http.Response, err error) - UpdateAccessTokenValidatorCommand(input *UpdateAccessTokenValidatorCommandInput) (result *AccessTokenValidatorView, resp *http.Response, err error) -} - -//GetAccessTokenValidatorsCommand - Get all Access Token Validators -//RequestType: GET -//Input: input *GetAccessTokenValidatorsCommandInput -func (s *AccessTokenValidatorsService) GetAccessTokenValidatorsCommand(input *GetAccessTokenValidatorsCommandInput) (result *AccessTokenValidatorsView, resp *http.Response, err error) { - path := "/accessTokenValidators" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAccessTokenValidatorsCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddAccessTokenValidatorCommand - Create an Access Token Validator -//RequestType: POST -//Input: input *AddAccessTokenValidatorCommandInput -func (s *AccessTokenValidatorsService) AddAccessTokenValidatorCommand(input *AddAccessTokenValidatorCommandInput) (result *AccessTokenValidatorView, resp *http.Response, err error) { - path := "/accessTokenValidators" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddAccessTokenValidatorCommandInput struct { - Body AccessTokenValidatorView -} - -//GetAccessTokenValidatorDescriptorsCommand - Get descriptors for all Access Token Validators -//RequestType: GET -//Input: -func (s *AccessTokenValidatorsService) GetAccessTokenValidatorDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) { - path := "/accessTokenValidators/descriptors" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//DeleteAccessTokenValidatorCommand - Delete a Access Token Validator -//RequestType: DELETE -//Input: input *DeleteAccessTokenValidatorCommandInput -func (s *AccessTokenValidatorsService) DeleteAccessTokenValidatorCommand(input *DeleteAccessTokenValidatorCommandInput) (resp *http.Response, err error) { - path := "/accessTokenValidators/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteAccessTokenValidatorCommandInput struct { - Id string -} - -//GetAccessTokenValidatorCommand - Get an Access Token Validator -//RequestType: GET -//Input: input *GetAccessTokenValidatorCommandInput -func (s *AccessTokenValidatorsService) GetAccessTokenValidatorCommand(input *GetAccessTokenValidatorCommandInput) (result *AccessTokenValidatorView, resp *http.Response, err error) { - path := "/accessTokenValidators/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAccessTokenValidatorCommandInput struct { - Id string -} - -//UpdateAccessTokenValidatorCommand - Update an Access Token Validator -//RequestType: PUT -//Input: input *UpdateAccessTokenValidatorCommandInput -func (s *AccessTokenValidatorsService) UpdateAccessTokenValidatorCommand(input *UpdateAccessTokenValidatorCommandInput) (result *AccessTokenValidatorView, resp *http.Response, err error) { - path := "/accessTokenValidators/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateAccessTokenValidatorCommandInput struct { - Body AccessTokenValidatorView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/acme.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/acme.go deleted file mode 100644 index 3c6bd7ef..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/acme.go +++ /dev/null @@ -1,458 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type AcmeService service - -type AcmeAPI interface { - GetAcmeServersCommand(input *GetAcmeServersCommandInput) (result *AcmeServersView, resp *http.Response, err error) - AddAcmeServerCommand(input *AddAcmeServerCommandInput) (result *AcmeServerView, resp *http.Response, err error) - GetDefaultAcmeServerCommand() (result *LinkView, resp *http.Response, err error) - UpdateDefaultAcmeServerCommand(input *UpdateDefaultAcmeServerCommandInput) (result *LinkView, resp *http.Response, err error) - DeleteAcmeServerCommand(input *DeleteAcmeServerCommandInput) (result *AcmeServerView, resp *http.Response, err error) - GetAcmeServerCommand(input *GetAcmeServerCommandInput) (result *AcmeServerView, resp *http.Response, err error) - GetAcmeAccountsCommand(input *GetAcmeAccountsCommandInput) (result *AcmeAccountView, resp *http.Response, err error) - AddAcmeAccountCommand(input *AddAcmeAccountCommandInput) (result *AcmeAccountView, resp *http.Response, err error) - DeleteAcmeAccountCommand(input *DeleteAcmeAccountCommandInput) (result *AcmeAccountView, resp *http.Response, err error) - GetAcmeAccountCommand(input *GetAcmeAccountCommandInput) (result *AcmeAccountView, resp *http.Response, err error) - GetAcmeCertificateRequestsCommand(input *GetAcmeCertificateRequestsCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) - AddAcmeCertificateRequestCommand(input *AddAcmeCertificateRequestCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) - DeleteAcmeCertificateRequestCommand(input *DeleteAcmeCertificateRequestCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) - GetAcmeCertificateRequestCommand(input *GetAcmeCertificateRequestCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) -} - -//GetAcmeServersCommand - Get all ACME Servers -//RequestType: GET -//Input: input *GetAcmeServersCommandInput -func (s *AcmeService) GetAcmeServersCommand(input *GetAcmeServersCommandInput) (result *AcmeServersView, resp *http.Response, err error) { - path := "/acme/servers" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAcmeServersCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddAcmeServerCommand - Add an ACME Server -//RequestType: POST -//Input: input *AddAcmeServerCommandInput -func (s *AcmeService) AddAcmeServerCommand(input *AddAcmeServerCommandInput) (result *AcmeServerView, resp *http.Response, err error) { - path := "/acme/servers" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddAcmeServerCommandInput struct { - Body AcmeServerView -} - -//GetDefaultAcmeServerCommand - Get the default ACME Server -//RequestType: GET -//Input: -func (s *AcmeService) GetDefaultAcmeServerCommand() (result *LinkView, resp *http.Response, err error) { - path := "/acme/servers/default" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateDefaultAcmeServerCommand - Update the default ACME Server -//RequestType: PUT -//Input: input *UpdateDefaultAcmeServerCommandInput -func (s *AcmeService) UpdateDefaultAcmeServerCommand(input *UpdateDefaultAcmeServerCommandInput) (result *LinkView, resp *http.Response, err error) { - path := "/acme/servers/default" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateDefaultAcmeServerCommandInput struct { - Body LinkView -} - -//DeleteAcmeServerCommand - Delete an ACME Server -//RequestType: DELETE -//Input: input *DeleteAcmeServerCommandInput -func (s *AcmeService) DeleteAcmeServerCommand(input *DeleteAcmeServerCommandInput) (result *AcmeServerView, resp *http.Response, err error) { - path := "/acme/servers/{acmeServerId}" - path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type DeleteAcmeServerCommandInput struct { - AcmeServerId string -} - -//GetAcmeServerCommand - Get an ACME Server -//RequestType: GET -//Input: input *GetAcmeServerCommandInput -func (s *AcmeService) GetAcmeServerCommand(input *GetAcmeServerCommandInput) (result *AcmeServerView, resp *http.Response, err error) { - path := "/acme/servers/{acmeServerId}" - path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAcmeServerCommandInput struct { - AcmeServerId string -} - -//GetAcmeAccountsCommand - Get all ACME Accounts -//RequestType: GET -//Input: input *GetAcmeAccountsCommandInput -func (s *AcmeService) GetAcmeAccountsCommand(input *GetAcmeAccountsCommandInput) (result *AcmeAccountView, resp *http.Response, err error) { - path := "/acme/servers/{acmeServerId}/accounts" - path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAcmeAccountsCommandInput struct { - Page string - NumberPerPage string - SortKey string - Order string - - AcmeServerId string -} - -//AddAcmeAccountCommand - Add an ACME Account -//RequestType: POST -//Input: input *AddAcmeAccountCommandInput -func (s *AcmeService) AddAcmeAccountCommand(input *AddAcmeAccountCommandInput) (result *AcmeAccountView, resp *http.Response, err error) { - path := "/acme/servers/{acmeServerId}/accounts" - path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddAcmeAccountCommandInput struct { - Body AcmeAccountView - AcmeServerId string -} - -//DeleteAcmeAccountCommand - Delete an ACME Account -//RequestType: DELETE -//Input: input *DeleteAcmeAccountCommandInput -func (s *AcmeService) DeleteAcmeAccountCommand(input *DeleteAcmeAccountCommandInput) (result *AcmeAccountView, resp *http.Response, err error) { - path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}" - path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) - - path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type DeleteAcmeAccountCommandInput struct { - AcmeServerId string - AcmeAccountId string -} - -//GetAcmeAccountCommand - Get an ACME Account -//RequestType: GET -//Input: input *GetAcmeAccountCommandInput -func (s *AcmeService) GetAcmeAccountCommand(input *GetAcmeAccountCommandInput) (result *AcmeAccountView, resp *http.Response, err error) { - path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}" - path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) - - path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAcmeAccountCommandInput struct { - AcmeServerId string - AcmeAccountId string -} - -//GetAcmeCertificateRequestsCommand - Get all ACME Certificate Requests -//RequestType: GET -//Input: input *GetAcmeCertificateRequestsCommandInput -func (s *AcmeService) GetAcmeCertificateRequestsCommand(input *GetAcmeCertificateRequestsCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) { - path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}/certificateRequests" - path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) - - path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.KeyPairId != "" { - q.Set("keyPairId", input.KeyPairId) - } - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAcmeCertificateRequestsCommandInput struct { - KeyPairId string - Page string - NumberPerPage string - SortKey string - Order string - - AcmeServerId string - AcmeAccountId string -} - -//AddAcmeCertificateRequestCommand - Initiate the ACME protocol -//RequestType: POST -//Input: input *AddAcmeCertificateRequestCommandInput -func (s *AcmeService) AddAcmeCertificateRequestCommand(input *AddAcmeCertificateRequestCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) { - path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}/certificateRequests" - path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) - - path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddAcmeCertificateRequestCommandInput struct { - Body AcmeCertificateRequestView - AcmeServerId string - AcmeAccountId string -} - -//DeleteAcmeCertificateRequestCommand - Delete an ACME Certificate Request -//RequestType: DELETE -//Input: input *DeleteAcmeCertificateRequestCommandInput -func (s *AcmeService) DeleteAcmeCertificateRequestCommand(input *DeleteAcmeCertificateRequestCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) { - path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}/certificateRequests/{acmeCertificateRequestId}" - path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) - - path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) - - path = strings.Replace(path, "{acmeCertificateRequestId}", input.AcmeCertificateRequestId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type DeleteAcmeCertificateRequestCommandInput struct { - AcmeServerId string - AcmeAccountId string - AcmeCertificateRequestId string -} - -//GetAcmeCertificateRequestCommand - Get an ACME Certificate Request -//RequestType: GET -//Input: input *GetAcmeCertificateRequestCommandInput -func (s *AcmeService) GetAcmeCertificateRequestCommand(input *GetAcmeCertificateRequestCommandInput) (result *AcmeCertificateRequestView, resp *http.Response, err error) { - path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}/certificateRequests/{acmeCertificateRequestId}" - path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) - - path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) - - path = strings.Replace(path, "{acmeCertificateRequestId}", input.AcmeCertificateRequestId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAcmeCertificateRequestCommandInput struct { - AcmeServerId string - AcmeAccountId string - AcmeCertificateRequestId string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminConfig.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminConfig.go deleted file mode 100644 index c3671eb7..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminConfig.go +++ /dev/null @@ -1,226 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type AdminConfigService service - -type AdminConfigAPI interface { - DeleteAdminConfigurationCommand() (resp *http.Response, err error) - GetAdminConfigurationCommand() (result *AdminConfigurationView, resp *http.Response, err error) - UpdateAdminConfigurationCommand(input *UpdateAdminConfigurationCommandInput) (result *AdminConfigurationView, resp *http.Response, err error) - GetReplicaAdminsCommand() (result *ReplicaAdminsView, resp *http.Response, err error) - AddReplicaAdminCommand(input *AddReplicaAdminCommandInput) (result *ReplicaAdminView, resp *http.Response, err error) - DeleteReplicaAdminCommand(input *DeleteReplicaAdminCommandInput) (resp *http.Response, err error) - GetReplicaAdminCommand(input *GetReplicaAdminCommandInput) (result *ReplicaAdminView, resp *http.Response, err error) - UpdateAdminReplicaCommand(input *UpdateAdminReplicaCommandInput) (result *ReplicaAdminView, resp *http.Response, err error) - GetAdminReplicaFileCommand(input *GetAdminReplicaFileCommandInput) (resp *http.Response, err error) -} - -//DeleteAdminConfigurationCommand - Resets the Admin Config to default values -//RequestType: DELETE -//Input: -func (s *AdminConfigService) DeleteAdminConfigurationCommand() (resp *http.Response, err error) { - path := "/adminConfig" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetAdminConfigurationCommand - Get the Admin Config -//RequestType: GET -//Input: -func (s *AdminConfigService) GetAdminConfigurationCommand() (result *AdminConfigurationView, resp *http.Response, err error) { - path := "/adminConfig" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateAdminConfigurationCommand - Update the Admin Config -//RequestType: PUT -//Input: input *UpdateAdminConfigurationCommandInput -func (s *AdminConfigService) UpdateAdminConfigurationCommand(input *UpdateAdminConfigurationCommandInput) (result *AdminConfigurationView, resp *http.Response, err error) { - path := "/adminConfig" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateAdminConfigurationCommandInput struct { - Body AdminConfigurationView -} - -//GetReplicaAdminsCommand - Get list of ReplicaAdmins -//RequestType: GET -//Input: -func (s *AdminConfigService) GetReplicaAdminsCommand() (result *ReplicaAdminsView, resp *http.Response, err error) { - path := "/adminConfig/replicaAdmins" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//AddReplicaAdminCommand - Add a ReplicaAdmin -//RequestType: POST -//Input: input *AddReplicaAdminCommandInput -func (s *AdminConfigService) AddReplicaAdminCommand(input *AddReplicaAdminCommandInput) (result *ReplicaAdminView, resp *http.Response, err error) { - path := "/adminConfig/replicaAdmins" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddReplicaAdminCommandInput struct { - Body ReplicaAdminView -} - -//DeleteReplicaAdminCommand - Delete a ReplicaAdmin -//RequestType: DELETE -//Input: input *DeleteReplicaAdminCommandInput -func (s *AdminConfigService) DeleteReplicaAdminCommand(input *DeleteReplicaAdminCommandInput) (resp *http.Response, err error) { - path := "/adminConfig/replicaAdmins/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteReplicaAdminCommandInput struct { - Id string -} - -//GetReplicaAdminCommand - Get a ReplicaAdmin -//RequestType: GET -//Input: input *GetReplicaAdminCommandInput -func (s *AdminConfigService) GetReplicaAdminCommand(input *GetReplicaAdminCommandInput) (result *ReplicaAdminView, resp *http.Response, err error) { - path := "/adminConfig/replicaAdmins/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetReplicaAdminCommandInput struct { - Id string -} - -//UpdateAdminReplicaCommand - Update a ReplicaAdmin -//RequestType: PUT -//Input: input *UpdateAdminReplicaCommandInput -func (s *AdminConfigService) UpdateAdminReplicaCommand(input *UpdateAdminReplicaCommandInput) (result *ReplicaAdminView, resp *http.Response, err error) { - path := "/adminConfig/replicaAdmins/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateAdminReplicaCommandInput struct { - Body ReplicaAdminView - Id string -} - -//GetAdminReplicaFileCommand - Get configuration file for a given ReplicaAdmin -//RequestType: POST -//Input: input *GetAdminReplicaFileCommandInput -func (s *AdminConfigService) GetAdminReplicaFileCommand(input *GetAdminReplicaFileCommandInput) (resp *http.Response, err error) { - path := "/adminConfig/replicaAdmins/{id}/config" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type GetAdminReplicaFileCommandInput struct { - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminSessionInfo.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminSessionInfo.go deleted file mode 100644 index 3f435b39..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/adminSessionInfo.go +++ /dev/null @@ -1,72 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type AdminSessionInfoService service - -type AdminSessionInfoAPI interface { - AdminSessionDeleteCommand() (resp *http.Response, err error) - AdminSessionInfoCommand() (result *SessionInfo, resp *http.Response, err error) - AdminSessionInfoCheckCommand() (result *SessionInfo, resp *http.Response, err error) -} - -//AdminSessionDeleteCommand - Invalidate the Admin session information -//RequestType: DELETE -//Input: -func (s *AdminSessionInfoService) AdminSessionDeleteCommand() (resp *http.Response, err error) { - path := "/adminSessionInfo" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//AdminSessionInfoCommand - Return the Admin session information -//RequestType: GET -//Input: -func (s *AdminSessionInfoService) AdminSessionInfoCommand() (result *SessionInfo, resp *http.Response, err error) { - path := "/adminSessionInfo" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//AdminSessionInfoCheckCommand - Return the Admin session information without affecting session expiration -//RequestType: GET -//Input: -func (s *AdminSessionInfoService) AdminSessionInfoCheckCommand() (result *SessionInfo, resp *http.Response, err error) { - path := "/adminSessionInfo/checkOnly" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/agents.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/agents.go deleted file mode 100644 index 5ce40e08..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/agents.go +++ /dev/null @@ -1,269 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type AgentsService service - -type AgentsAPI interface { - GetAgentsCommand(input *GetAgentsCommandInput) (result *AgentsView, resp *http.Response, err error) - AddAgentCommand(input *AddAgentCommandInput) (result *AgentView, resp *http.Response, err error) - GetAgentCertificatesCommand(input *GetAgentCertificatesCommandInput) (result *AgentCertificatesView, resp *http.Response, err error) - GetAgentCertificateCommand(input *GetAgentCertificateCommandInput) (result *AgentCertificateView, resp *http.Response, err error) - GetAgentFileCommand(input *GetAgentFileCommandInput) (resp *http.Response, err error) - DeleteAgentCommand(input *DeleteAgentCommandInput) (resp *http.Response, err error) - GetAgentCommand(input *GetAgentCommandInput) (result *AgentView, resp *http.Response, err error) - UpdateAgentCommand(input *UpdateAgentCommandInput) (result *AgentView, resp *http.Response, err error) -} - -//GetAgentsCommand - Get all Agents -//RequestType: GET -//Input: input *GetAgentsCommandInput -func (s *AgentsService) GetAgentsCommand(input *GetAgentsCommandInput) (result *AgentsView, resp *http.Response, err error) { - path := "/agents" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAgentsCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddAgentCommand - Add an Agent -//RequestType: POST -//Input: input *AddAgentCommandInput -func (s *AgentsService) AddAgentCommand(input *AddAgentCommandInput) (result *AgentView, resp *http.Response, err error) { - path := "/agents" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddAgentCommandInput struct { - Body AgentView -} - -//GetAgentCertificatesCommand - Get all Agent Certificates -//RequestType: GET -//Input: input *GetAgentCertificatesCommandInput -func (s *AgentsService) GetAgentCertificatesCommand(input *GetAgentCertificatesCommandInput) (result *AgentCertificatesView, resp *http.Response, err error) { - path := "/agents/certificates" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Alias != "" { - q.Set("alias", input.Alias) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAgentCertificatesCommandInput struct { - Page string - NumberPerPage string - Filter string - Alias string - SortKey string - Order string -} - -//GetAgentCertificateCommand - Get an Agent Certificate -//RequestType: GET -//Input: input *GetAgentCertificateCommandInput -func (s *AgentsService) GetAgentCertificateCommand(input *GetAgentCertificateCommandInput) (result *AgentCertificateView, resp *http.Response, err error) { - path := "/agents/certificates/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAgentCertificateCommandInput struct { - Id string -} - -//GetAgentFileCommand - Get a configuration file for an Agent -//RequestType: GET -//Input: input *GetAgentFileCommandInput -func (s *AgentsService) GetAgentFileCommand(input *GetAgentFileCommandInput) (resp *http.Response, err error) { - path := "/agents/{agentId}/config/{sharedSecretId}" - path = strings.Replace(path, "{agentId}", input.AgentId, -1) - - path = strings.Replace(path, "{sharedSecretId}", input.SharedSecretId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type GetAgentFileCommandInput struct { - AgentId string - SharedSecretId string -} - -//DeleteAgentCommand - Delete an Agent -//RequestType: DELETE -//Input: input *DeleteAgentCommandInput -func (s *AgentsService) DeleteAgentCommand(input *DeleteAgentCommandInput) (resp *http.Response, err error) { - path := "/agents/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteAgentCommandInput struct { - Id string -} - -//GetAgentCommand - Get an Agent -//RequestType: GET -//Input: input *GetAgentCommandInput -func (s *AgentsService) GetAgentCommand(input *GetAgentCommandInput) (result *AgentView, resp *http.Response, err error) { - path := "/agents/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAgentCommandInput struct { - Id string -} - -//UpdateAgentCommand - Update an Agent -//RequestType: PUT -//Input: input *UpdateAgentCommandInput -func (s *AgentsService) UpdateAgentCommand(input *UpdateAgentCommandInput) (result *AgentView, resp *http.Response, err error) { - path := "/agents/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateAgentCommandInput struct { - Body AgentView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/applications.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/applications.go deleted file mode 100644 index 5cf1ba8b..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/applications.go +++ /dev/null @@ -1,546 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type ApplicationsService service - -type ApplicationsAPI interface { - GetApplicationsCommand(input *GetApplicationsCommandInput) (result *ApplicationsView, resp *http.Response, err error) - AddApplicationCommand(input *AddApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) - DeleteReservedApplicationCommand() (resp *http.Response, err error) - GetReservedApplicationCommand() (result *ReservedApplicationView, resp *http.Response, err error) - UpdateReservedApplicationCommand(input *UpdateReservedApplicationCommandInput) (result *ReservedApplicationView, resp *http.Response, err error) - GetResourcesCommand(input *GetResourcesCommandInput) (result *ResourcesView, resp *http.Response, err error) - GetApplicationsResourcesMethodsCommand() (result *MethodsView, resp *http.Response, err error) - DeleteApplicationResourceCommand(input *DeleteApplicationResourceCommandInput) (resp *http.Response, err error) - GetApplicationResourceCommand(input *GetApplicationResourceCommandInput) (result *ResourceView, resp *http.Response, err error) - UpdateApplicationResourceCommand(input *UpdateApplicationResourceCommandInput) (result *ResourceView, resp *http.Response, err error) - DeleteApplicationCommand(input *DeleteApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) - GetApplicationCommand(input *GetApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) - UpdateApplicationCommand(input *UpdateApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) - GetResourceMatchingEvaluationOrderCommand(input *GetResourceMatchingEvaluationOrderCommandInput) (result *ResourceMatchingEvaluationOrderView, resp *http.Response, err error) - GetApplicationResourcesCommand(input *GetApplicationResourcesCommandInput) (result *ResourcesView, resp *http.Response, err error) - AddApplicationResourceCommand(input *AddApplicationResourceCommandInput) (result *ResourceView, resp *http.Response, err error) - GetResourceAutoOrderCommand(input *GetResourceAutoOrderCommandInput) (result *ResourceOrderView, resp *http.Response, err error) -} - -//GetApplicationsCommand - Get all Applications -//RequestType: GET -//Input: input *GetApplicationsCommandInput -func (s *ApplicationsService) GetApplicationsCommand(input *GetApplicationsCommandInput) (result *ApplicationsView, resp *http.Response, err error) { - path := "/applications" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.SiteId != "" { - q.Set("siteId", input.SiteId) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.AgentId != "" { - q.Set("agentId", input.AgentId) - } - if input.VirtualHostId != "" { - q.Set("virtualHostId", input.VirtualHostId) - } - if input.RuleId != "" { - q.Set("ruleId", input.RuleId) - } - if input.RulesetId != "" { - q.Set("rulesetId", input.RulesetId) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetApplicationsCommandInput struct { - Page string - SiteId string - NumberPerPage string - AgentId string - VirtualHostId string - RuleId string - RulesetId string - Filter string - Name string - SortKey string - Order string -} - -//AddApplicationCommand - Add an Application -//RequestType: POST -//Input: input *AddApplicationCommandInput -func (s *ApplicationsService) AddApplicationCommand(input *AddApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) { - path := "/applications" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddApplicationCommandInput struct { - Body ApplicationView -} - -//DeleteReservedApplicationCommand - Resets the Reserved Application configuration to default values -//RequestType: DELETE -//Input: -func (s *ApplicationsService) DeleteReservedApplicationCommand() (resp *http.Response, err error) { - path := "/applications/reserved" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetReservedApplicationCommand - Get Reserved Application configuration -//RequestType: GET -//Input: -func (s *ApplicationsService) GetReservedApplicationCommand() (result *ReservedApplicationView, resp *http.Response, err error) { - path := "/applications/reserved" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateReservedApplicationCommand - Update Reserved Application configuration -//RequestType: PUT -//Input: input *UpdateReservedApplicationCommandInput -func (s *ApplicationsService) UpdateReservedApplicationCommand(input *UpdateReservedApplicationCommandInput) (result *ReservedApplicationView, resp *http.Response, err error) { - path := "/applications/reserved" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateReservedApplicationCommandInput struct { - Body ReservedApplicationView -} - -//GetResourcesCommand - Get all Resources -//RequestType: GET -//Input: input *GetResourcesCommandInput -func (s *ApplicationsService) GetResourcesCommand(input *GetResourcesCommandInput) (result *ResourcesView, resp *http.Response, err error) { - path := "/applications/resources" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.RuleId != "" { - q.Set("ruleId", input.RuleId) - } - if input.RulesetId != "" { - q.Set("rulesetId", input.RulesetId) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetResourcesCommandInput struct { - Page string - NumberPerPage string - RuleId string - RulesetId string - Name string - Filter string - SortKey string - Order string -} - -//GetApplicationsResourcesMethodsCommand - Get Application Resource Methods -//RequestType: GET -//Input: -func (s *ApplicationsService) GetApplicationsResourcesMethodsCommand() (result *MethodsView, resp *http.Response, err error) { - path := "/applications/resources/methods" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//DeleteApplicationResourceCommand - Delete an Application Resource -//RequestType: DELETE -//Input: input *DeleteApplicationResourceCommandInput -func (s *ApplicationsService) DeleteApplicationResourceCommand(input *DeleteApplicationResourceCommandInput) (resp *http.Response, err error) { - path := "/applications/{applicationId}/resources/{resourceId}" - path = strings.Replace(path, "{applicationId}", input.ApplicationId, -1) - - path = strings.Replace(path, "{resourceId}", input.ResourceId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteApplicationResourceCommandInput struct { - ApplicationId string - ResourceId string -} - -//GetApplicationResourceCommand - Get an Application Resource -//RequestType: GET -//Input: input *GetApplicationResourceCommandInput -func (s *ApplicationsService) GetApplicationResourceCommand(input *GetApplicationResourceCommandInput) (result *ResourceView, resp *http.Response, err error) { - path := "/applications/{applicationId}/resources/{resourceId}" - path = strings.Replace(path, "{applicationId}", input.ApplicationId, -1) - - path = strings.Replace(path, "{resourceId}", input.ResourceId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetApplicationResourceCommandInput struct { - ApplicationId string - ResourceId string -} - -//UpdateApplicationResourceCommand - Update an Application Resource -//RequestType: PUT -//Input: input *UpdateApplicationResourceCommandInput -func (s *ApplicationsService) UpdateApplicationResourceCommand(input *UpdateApplicationResourceCommandInput) (result *ResourceView, resp *http.Response, err error) { - path := "/applications/{applicationId}/resources/{resourceId}" - path = strings.Replace(path, "{applicationId}", input.ApplicationId, -1) - - path = strings.Replace(path, "{resourceId}", input.ResourceId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateApplicationResourceCommandInput struct { - Body ResourceView - ApplicationId string - ResourceId string -} - -//DeleteApplicationCommand - Delete an Application -//RequestType: DELETE -//Input: input *DeleteApplicationCommandInput -func (s *ApplicationsService) DeleteApplicationCommand(input *DeleteApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) { - path := "/applications/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type DeleteApplicationCommandInput struct { - Id string -} - -//GetApplicationCommand - Get an Application -//RequestType: GET -//Input: input *GetApplicationCommandInput -func (s *ApplicationsService) GetApplicationCommand(input *GetApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) { - path := "/applications/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetApplicationCommandInput struct { - Id string -} - -//UpdateApplicationCommand - Update an Application -//RequestType: PUT -//Input: input *UpdateApplicationCommandInput -func (s *ApplicationsService) UpdateApplicationCommand(input *UpdateApplicationCommandInput) (result *ApplicationView, resp *http.Response, err error) { - path := "/applications/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateApplicationCommandInput struct { - Body ApplicationView - Id string -} - -//GetResourceMatchingEvaluationOrderCommand - Get the resource path ordering for an Application -//RequestType: GET -//Input: input *GetResourceMatchingEvaluationOrderCommandInput -func (s *ApplicationsService) GetResourceMatchingEvaluationOrderCommand(input *GetResourceMatchingEvaluationOrderCommandInput) (result *ResourceMatchingEvaluationOrderView, resp *http.Response, err error) { - path := "/applications/{id}/resourceMatchingEvaluationOrder" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetResourceMatchingEvaluationOrderCommandInput struct { - Id string -} - -//GetApplicationResourcesCommand - Get Resources for an Application -//RequestType: GET -//Input: input *GetApplicationResourcesCommandInput -func (s *ApplicationsService) GetApplicationResourcesCommand(input *GetApplicationResourcesCommandInput) (result *ResourcesView, resp *http.Response, err error) { - path := "/applications/{id}/resources" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetApplicationResourcesCommandInput struct { - Page string - NumberPerPage string - Name string - Filter string - SortKey string - Order string - - Id string -} - -//AddApplicationResourceCommand - Add Resource to an Application -//RequestType: POST -//Input: input *AddApplicationResourceCommandInput -func (s *ApplicationsService) AddApplicationResourceCommand(input *AddApplicationResourceCommandInput) (result *ResourceView, resp *http.Response, err error) { - path := "/applications/{id}/resources" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddApplicationResourceCommandInput struct { - Body ResourceView - Id string -} - -//GetResourceAutoOrderCommand - Computes an automatic, intelligent resource ordering for an Application. -//RequestType: GET -//Input: input *GetResourceAutoOrderCommandInput -func (s *ApplicationsService) GetResourceAutoOrderCommand(input *GetResourceAutoOrderCommandInput) (result *ResourceOrderView, resp *http.Response, err error) { - path := "/applications/{id}/resources/autoOrder" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetResourceAutoOrderCommandInput struct { - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/auth.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/auth.go deleted file mode 100644 index 1ef6cd0f..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/auth.go +++ /dev/null @@ -1,268 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type AuthService service - -type AuthAPI interface { - DeleteBasicAuthCommand() (resp *http.Response, err error) - GetBasicAuthCommand() (result *BasicConfig, resp *http.Response, err error) - UpdateBasicAuthCommand(input *UpdateBasicAuthCommandInput) (result *BasicAuthConfigView, resp *http.Response, err error) - DeleteOAuthAuthCommand() (resp *http.Response, err error) - GetOAuthAuthCommand() (result *OAuthConfigView, resp *http.Response, err error) - UpdateOAuthAuthCommand(input *UpdateOAuthAuthCommandInput) (result *OAuthConfigView, resp *http.Response, err error) - DeleteOidcAuthCommand() (resp *http.Response, err error) - GetOidcAuthCommand() (result *OidcConfigView, resp *http.Response, err error) - UpdateOidcAuthCommand(input *UpdateOidcAuthCommandInput) (result *OidcConfigView, resp *http.Response, err error) - DeleteAdminBasicWebSessionCommand() (resp *http.Response, err error) - GetAdminBasicWebSessionCommand() (result *AdminBasicWebSessionView, resp *http.Response, err error) - UpdateAdminBasicWebSessionCommand(input *UpdateAdminBasicWebSessionCommandInput) (result *AdminBasicWebSessionView, resp *http.Response, err error) -} - -//DeleteBasicAuthCommand - Resets the HTTP Basic Authentication configuration to default values -//RequestType: DELETE -//Input: -func (s *AuthService) DeleteBasicAuthCommand() (resp *http.Response, err error) { - path := "/auth/basic" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetBasicAuthCommand - Get the HTTP Basic Authentication configuration -//RequestType: GET -//Input: -func (s *AuthService) GetBasicAuthCommand() (result *BasicConfig, resp *http.Response, err error) { - path := "/auth/basic" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateBasicAuthCommand - Update the Basic Authentication configuration -//RequestType: PUT -//Input: input *UpdateBasicAuthCommandInput -func (s *AuthService) UpdateBasicAuthCommand(input *UpdateBasicAuthCommandInput) (result *BasicAuthConfigView, resp *http.Response, err error) { - path := "/auth/basic" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateBasicAuthCommandInput struct { - Body BasicAuthConfigView -} - -//DeleteOAuthAuthCommand - Resets the OAuth Authentication configuration to default values -//RequestType: DELETE -//Input: -func (s *AuthService) DeleteOAuthAuthCommand() (resp *http.Response, err error) { - path := "/auth/oauth" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetOAuthAuthCommand - Get the OAuth Authentication configuration -//RequestType: GET -//Input: -func (s *AuthService) GetOAuthAuthCommand() (result *OAuthConfigView, resp *http.Response, err error) { - path := "/auth/oauth" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateOAuthAuthCommand - Update the OAuth Authentication configuration -//RequestType: PUT -//Input: input *UpdateOAuthAuthCommandInput -func (s *AuthService) UpdateOAuthAuthCommand(input *UpdateOAuthAuthCommandInput) (result *OAuthConfigView, resp *http.Response, err error) { - path := "/auth/oauth" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateOAuthAuthCommandInput struct { - Body OAuthConfigView -} - -//DeleteOidcAuthCommand - Resets the OIDC Authentication configuration to default values -//RequestType: DELETE -//Input: -func (s *AuthService) DeleteOidcAuthCommand() (resp *http.Response, err error) { - path := "/auth/oidc" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetOidcAuthCommand - Get the OIDC Authentication configuration -//RequestType: GET -//Input: -func (s *AuthService) GetOidcAuthCommand() (result *OidcConfigView, resp *http.Response, err error) { - path := "/auth/oidc" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateOidcAuthCommand - Update the OIDC Authentication configuration -//RequestType: PUT -//Input: input *UpdateOidcAuthCommandInput -func (s *AuthService) UpdateOidcAuthCommand(input *UpdateOidcAuthCommandInput) (result *OidcConfigView, resp *http.Response, err error) { - path := "/auth/oidc" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateOidcAuthCommandInput struct { - Body OidcConfigView -} - -//DeleteAdminBasicWebSessionCommand - Resets the Admin Web Session configuration to default values -//RequestType: DELETE -//Input: -func (s *AuthService) DeleteAdminBasicWebSessionCommand() (resp *http.Response, err error) { - path := "/auth/webSession" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetAdminBasicWebSessionCommand - Get the admin web session configuration -//RequestType: GET -//Input: -func (s *AuthService) GetAdminBasicWebSessionCommand() (result *AdminBasicWebSessionView, resp *http.Response, err error) { - path := "/auth/webSession" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateAdminBasicWebSessionCommand - Update the admin web session configuration -//RequestType: PUT -//Input: input *UpdateAdminBasicWebSessionCommandInput -func (s *AuthService) UpdateAdminBasicWebSessionCommand(input *UpdateAdminBasicWebSessionCommandInput) (result *AdminBasicWebSessionView, resp *http.Response, err error) { - path := "/auth/webSession" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateAdminBasicWebSessionCommandInput struct { - Body AdminBasicWebSessionView -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authTokenManagement.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authTokenManagement.go deleted file mode 100644 index 0185c69a..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authTokenManagement.go +++ /dev/null @@ -1,120 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type AuthTokenManagementService service - -type AuthTokenManagementAPI interface { - DeleteAuthTokenManagementCommand() (resp *http.Response, err error) - GetAuthTokenManagementCommand() (result *AuthTokenManagementView, resp *http.Response, err error) - UpdateAuthTokenManagementCommand(input *UpdateAuthTokenManagementCommandInput) (result *AuthTokenManagementView, resp *http.Response, err error) - GetAuthTokenKeySetCommand() (result *KeySetView, resp *http.Response, err error) - UpdateAuthTokenKeySetCommand(input *UpdateAuthTokenKeySetCommandInput) (result *KeySetView, resp *http.Response, err error) -} - -//DeleteAuthTokenManagementCommand - Resets the Auth Token Management configuration to default values -//RequestType: DELETE -//Input: -func (s *AuthTokenManagementService) DeleteAuthTokenManagementCommand() (resp *http.Response, err error) { - path := "/authTokenManagement" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetAuthTokenManagementCommand - Get the Auth Token Management configuration -//RequestType: GET -//Input: -func (s *AuthTokenManagementService) GetAuthTokenManagementCommand() (result *AuthTokenManagementView, resp *http.Response, err error) { - path := "/authTokenManagement" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateAuthTokenManagementCommand - Update the Auth Token Management configuration -//RequestType: PUT -//Input: input *UpdateAuthTokenManagementCommandInput -func (s *AuthTokenManagementService) UpdateAuthTokenManagementCommand(input *UpdateAuthTokenManagementCommandInput) (result *AuthTokenManagementView, resp *http.Response, err error) { - path := "/authTokenManagement" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateAuthTokenManagementCommandInput struct { - Body AuthTokenManagementView -} - -//GetAuthTokenKeySetCommand - Get the Auth Token key set configuration -//RequestType: GET -//Input: -func (s *AuthTokenManagementService) GetAuthTokenKeySetCommand() (result *KeySetView, resp *http.Response, err error) { - path := "/authTokenManagement/keySet" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateAuthTokenKeySetCommand - Update the AuthToken key set configuration -//RequestType: PUT -//Input: input *UpdateAuthTokenKeySetCommandInput -func (s *AuthTokenManagementService) UpdateAuthTokenKeySetCommand(input *UpdateAuthTokenKeySetCommandInput) (result *KeySetView, resp *http.Response, err error) { - path := "/authTokenManagement/keySet" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateAuthTokenKeySetCommandInput struct { - Body KeySetView -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authnReqLists.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authnReqLists.go deleted file mode 100644 index 4cffc600..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/authnReqLists.go +++ /dev/null @@ -1,165 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type AuthnReqListsService service - -type AuthnReqListsAPI interface { - GetAuthnReqListsCommand(input *GetAuthnReqListsCommandInput) (result *AuthnReqListsView, resp *http.Response, err error) - AddAuthnReqListCommand(input *AddAuthnReqListCommandInput) (result *AuthnReqListView, resp *http.Response, err error) - DeleteAuthnReqListCommand(input *DeleteAuthnReqListCommandInput) (resp *http.Response, err error) - GetAuthnReqListCommand(input *GetAuthnReqListCommandInput) (result *AuthnReqListView, resp *http.Response, err error) - UpdateAuthnReqListCommand(input *UpdateAuthnReqListCommandInput) (result *AuthnReqListView, resp *http.Response, err error) -} - -//GetAuthnReqListsCommand - Get all Authentication Requirement Lists -//RequestType: GET -//Input: input *GetAuthnReqListsCommandInput -func (s *AuthnReqListsService) GetAuthnReqListsCommand(input *GetAuthnReqListsCommandInput) (result *AuthnReqListsView, resp *http.Response, err error) { - path := "/authnReqLists" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAuthnReqListsCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddAuthnReqListCommand - Add an Authentication Requirement List -//RequestType: POST -//Input: input *AddAuthnReqListCommandInput -func (s *AuthnReqListsService) AddAuthnReqListCommand(input *AddAuthnReqListCommandInput) (result *AuthnReqListView, resp *http.Response, err error) { - path := "/authnReqLists" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddAuthnReqListCommandInput struct { - Body AuthnReqListView -} - -//DeleteAuthnReqListCommand - Delete an Authentication Requirement List -//RequestType: DELETE -//Input: input *DeleteAuthnReqListCommandInput -func (s *AuthnReqListsService) DeleteAuthnReqListCommand(input *DeleteAuthnReqListCommandInput) (resp *http.Response, err error) { - path := "/authnReqLists/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteAuthnReqListCommandInput struct { - Id string -} - -//GetAuthnReqListCommand - Get an Authentication Requirement List -//RequestType: GET -//Input: input *GetAuthnReqListCommandInput -func (s *AuthnReqListsService) GetAuthnReqListCommand(input *GetAuthnReqListCommandInput) (result *AuthnReqListView, resp *http.Response, err error) { - path := "/authnReqLists/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAuthnReqListCommandInput struct { - Id string -} - -//UpdateAuthnReqListCommand - Update an Authentication Requirement List -//RequestType: PUT -//Input: input *UpdateAuthnReqListCommandInput -func (s *AuthnReqListsService) UpdateAuthnReqListCommand(input *UpdateAuthnReqListCommandInput) (result *AuthnReqListView, resp *http.Response, err error) { - path := "/authnReqLists/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateAuthnReqListCommandInput struct { - Body AuthnReqListView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/backup.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/backup.go deleted file mode 100644 index fd67ac14..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/backup.go +++ /dev/null @@ -1,32 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type BackupService service - -type BackupAPI interface { - BackupCommand() (resp *http.Response, err error) -} - -//BackupCommand - Create a local database backup -//RequestType: GET -//Input: -func (s *BackupService) BackupCommand() (resp *http.Response, err error) { - path := "/backup" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/certificates.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/certificates.go deleted file mode 100644 index 1935d24b..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/certificates.go +++ /dev/null @@ -1,191 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type CertificatesService service - -type CertificatesAPI interface { - GetTrustedCerts(input *GetTrustedCertsInput) (result *TrustedCertsView, resp *http.Response, err error) - ImportTrustedCert(input *ImportTrustedCertInput) (result *TrustedCertView, resp *http.Response, err error) - DeleteTrustedCertCommand(input *DeleteTrustedCertCommandInput) (resp *http.Response, err error) - GetTrustedCert(input *GetTrustedCertInput) (result *TrustedCertView, resp *http.Response, err error) - UpdateTrustedCert(input *UpdateTrustedCertInput) (result *TrustedCertView, resp *http.Response, err error) - ExportTrustedCert(input *ExportTrustedCertInput) (resp *http.Response, err error) -} - -//GetTrustedCerts - Get all Certificates -//RequestType: GET -//Input: input *GetTrustedCertsInput -func (s *CertificatesService) GetTrustedCerts(input *GetTrustedCertsInput) (result *TrustedCertsView, resp *http.Response, err error) { - path := "/certificates" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Alias != "" { - q.Set("alias", input.Alias) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetTrustedCertsInput struct { - Page string - NumberPerPage string - Filter string - Alias string - SortKey string - Order string -} - -//ImportTrustedCert - Import a Certificate -//RequestType: POST -//Input: input *ImportTrustedCertInput -func (s *CertificatesService) ImportTrustedCert(input *ImportTrustedCertInput) (result *TrustedCertView, resp *http.Response, err error) { - path := "/certificates" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type ImportTrustedCertInput struct { - Body X509FileImportDocView -} - -//DeleteTrustedCertCommand - Delete a Certificate -//RequestType: DELETE -//Input: input *DeleteTrustedCertCommandInput -func (s *CertificatesService) DeleteTrustedCertCommand(input *DeleteTrustedCertCommandInput) (resp *http.Response, err error) { - path := "/certificates/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteTrustedCertCommandInput struct { - Id string -} - -//GetTrustedCert - Get a Certificate -//RequestType: GET -//Input: input *GetTrustedCertInput -func (s *CertificatesService) GetTrustedCert(input *GetTrustedCertInput) (result *TrustedCertView, resp *http.Response, err error) { - path := "/certificates/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetTrustedCertInput struct { - Id string -} - -//UpdateTrustedCert - Update a Certificate -//RequestType: PUT -//Input: input *UpdateTrustedCertInput -func (s *CertificatesService) UpdateTrustedCert(input *UpdateTrustedCertInput) (result *TrustedCertView, resp *http.Response, err error) { - path := "/certificates/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateTrustedCertInput struct { - Body X509FileImportDocView - Id string -} - -//ExportTrustedCert - Export a Certificate -//RequestType: GET -//Input: input *ExportTrustedCertInput -func (s *CertificatesService) ExportTrustedCert(input *ExportTrustedCertInput) (resp *http.Response, err error) { - path := "/certificates/{id}/file" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type ExportTrustedCertInput struct { - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/client.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/client.go new file mode 100644 index 00000000..665106f7 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/client.go @@ -0,0 +1,45 @@ +package client + +import ( + "io" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +type Options struct { + Config config.Config + CustomCABundle io.Reader +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*config.Config) config.Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + //request.Retryer + metadata.ClientInfo + + Config config.Config + //Handlers request.Handlers +} + +func New(cfg config.Config, info metadata.ClientInfo) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + //Handlers: handlers.Copy(), + } + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, operation, params, data) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata/clientinfo.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata/clientinfo.go new file mode 100644 index 00000000..c6d293e4 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata/clientinfo.go @@ -0,0 +1,9 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + BasePath string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config.go deleted file mode 100644 index 79a8180e..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config.go +++ /dev/null @@ -1,219 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type ConfigService service - -type ConfigAPI interface { - ConfigExportCommand() (result *ExportData, resp *http.Response, err error) - GetConfigExportWorkflowsCommand() (result *ConfigStatusesView, resp *http.Response, err error) - AddConfigExportWorkflowCommand() (result *ConfigStatusView, resp *http.Response, err error) - GetConfigExportWorkflowCommand(input *GetConfigExportWorkflowCommandInput) (result *ConfigStatusView, resp *http.Response, err error) - GetConfigExportWorkflowDataCommand(input *GetConfigExportWorkflowDataCommandInput) (result *ExportData, resp *http.Response, err error) - ConfigImportCommand(input *ConfigImportCommandInput) (resp *http.Response, err error) - GetConfigImportWorkflowsCommand() (result *ConfigStatusesView, resp *http.Response, err error) - AddConfigImportWorkflowCommand(input *AddConfigImportWorkflowCommandInput) (resp *http.Response, err error) - GetConfigImportWorkflowCommand(input *GetConfigImportWorkflowCommandInput) (result *ConfigStatusView, resp *http.Response, err error) -} - -//ConfigExportCommand - [Attention: The endpoint "/config/export" is deprecated. The "config/export/workflows" endpoint should be used instead.] Export a JSON backup of the entire system -//RequestType: GET -//Input: -func (s *ConfigService) ConfigExportCommand() (result *ExportData, resp *http.Response, err error) { - path := "/config/export" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetConfigExportWorkflowsCommand - Get the status of pending Exports -//RequestType: GET -//Input: -func (s *ConfigService) GetConfigExportWorkflowsCommand() (result *ConfigStatusesView, resp *http.Response, err error) { - path := "/config/export/workflows" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//AddConfigExportWorkflowCommand - Start a JSON backup of the entire system for export -//RequestType: POST -//Input: -func (s *ConfigService) AddConfigExportWorkflowCommand() (result *ConfigStatusView, resp *http.Response, err error) { - path := "/config/export/workflows" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetConfigExportWorkflowCommand - Check the status of an Export -//RequestType: GET -//Input: input *GetConfigExportWorkflowCommandInput -func (s *ConfigService) GetConfigExportWorkflowCommand(input *GetConfigExportWorkflowCommandInput) (result *ConfigStatusView, resp *http.Response, err error) { - path := "/config/export/workflows/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetConfigExportWorkflowCommandInput struct { - Id string -} - -//GetConfigExportWorkflowDataCommand - Export a JSON backup of the entire system -//RequestType: GET -//Input: input *GetConfigExportWorkflowDataCommandInput -func (s *ConfigService) GetConfigExportWorkflowDataCommand(input *GetConfigExportWorkflowDataCommandInput) (result *ExportData, resp *http.Response, err error) { - path := "/config/export/workflows/{id}/data" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetConfigExportWorkflowDataCommandInput struct { - Id string -} - -//ConfigImportCommand - [Attention: The endpoint "/config/import" is deprecated. The "config/import/workflows" endpoint should be used instead.] Import JSON backup. -//RequestType: POST -//Input: input *ConfigImportCommandInput -func (s *ConfigService) ConfigImportCommand(input *ConfigImportCommandInput) (resp *http.Response, err error) { - path := "/config/import" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type ConfigImportCommandInput struct { - Body string -} - -//GetConfigImportWorkflowsCommand - Get the status of pending imports -//RequestType: GET -//Input: -func (s *ConfigService) GetConfigImportWorkflowsCommand() (result *ConfigStatusesView, resp *http.Response, err error) { - path := "/config/import/workflows" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//AddConfigImportWorkflowCommand - Start an Import of a JSON backup -//RequestType: POST -//Input: input *AddConfigImportWorkflowCommandInput -func (s *ConfigService) AddConfigImportWorkflowCommand(input *AddConfigImportWorkflowCommandInput) (resp *http.Response, err error) { - path := "/config/import/workflows" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type AddConfigImportWorkflowCommandInput struct { - Body ExportData -} - -//GetConfigImportWorkflowCommand - Check the status of an Export -//RequestType: GET -//Input: input *GetConfigImportWorkflowCommandInput -func (s *ConfigService) GetConfigImportWorkflowCommand(input *GetConfigImportWorkflowCommandInput) (result *ConfigStatusView, resp *http.Response, err error) { - path := "/config/import/workflows/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetConfigImportWorkflowCommandInput struct { - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config/config.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config/config.go new file mode 100644 index 00000000..7cdd7894 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/config/config.go @@ -0,0 +1,57 @@ +package config + +import ( + "crypto/tls" + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" +) + +type Config struct { + Username *string + Password *string + LogDebug *bool + MaskAuthorization *bool + Endpoint *string + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client +} + +func NewConfig() *Config { + //TODO this is epically shit dont leave it here, parse a ca bundle etc + /* #nosec */ + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + + return &Config{ + MaskAuthorization: pingaccess.Bool(true), + LogDebug: pingaccess.Bool(false), + HTTPClient: http.DefaultClient, + } +} + +func (c *Config) WithPassword(password string) *Config { + c.Password = pingaccess.String(password) + return c +} + +func (c *Config) WithUsername(username string) *Config { + c.Username = pingaccess.String(username) + return c +} + +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = pingaccess.String(endpoint) + return c +} + +func (c *Config) WithDebug(debug bool) *Config { + c.LogDebug = pingaccess.Bool(debug) + return c +} + +func (c *Config) WithMaskAuthorization(debug bool) *Config { + c.MaskAuthorization = pingaccess.Bool(debug) + return c +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engineListeners.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engineListeners.go deleted file mode 100644 index a58e11bc..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engineListeners.go +++ /dev/null @@ -1,165 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type EngineListenersService service - -type EngineListenersAPI interface { - GetEngineListenersCommand(input *GetEngineListenersCommandInput) (result *EngineListenersView, resp *http.Response, err error) - AddEngineListenerCommand(input *AddEngineListenerCommandInput) (result *EngineListenerView, resp *http.Response, err error) - DeleteEngineListenerCommand(input *DeleteEngineListenerCommandInput) (resp *http.Response, err error) - GetEngineListenerCommand(input *GetEngineListenerCommandInput) (result *EngineListenerView, resp *http.Response, err error) - UpdateEngineListenerCommand(input *UpdateEngineListenerCommandInput) (result *EngineListenerView, resp *http.Response, err error) -} - -//GetEngineListenersCommand - Get all Engine Listeners -//RequestType: GET -//Input: input *GetEngineListenersCommandInput -func (s *EngineListenersService) GetEngineListenersCommand(input *GetEngineListenersCommandInput) (result *EngineListenersView, resp *http.Response, err error) { - path := "/engineListeners" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetEngineListenersCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddEngineListenerCommand - Create an Engine Listener -//RequestType: POST -//Input: input *AddEngineListenerCommandInput -func (s *EngineListenersService) AddEngineListenerCommand(input *AddEngineListenerCommandInput) (result *EngineListenerView, resp *http.Response, err error) { - path := "/engineListeners" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddEngineListenerCommandInput struct { - Body EngineListenerView -} - -//DeleteEngineListenerCommand - Delete an Engine Listener -//RequestType: DELETE -//Input: input *DeleteEngineListenerCommandInput -func (s *EngineListenersService) DeleteEngineListenerCommand(input *DeleteEngineListenerCommandInput) (resp *http.Response, err error) { - path := "/engineListeners/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteEngineListenerCommandInput struct { - Id string -} - -//GetEngineListenerCommand - Get an Engine Listener -//RequestType: GET -//Input: input *GetEngineListenerCommandInput -func (s *EngineListenersService) GetEngineListenerCommand(input *GetEngineListenerCommandInput) (result *EngineListenerView, resp *http.Response, err error) { - path := "/engineListeners/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetEngineListenerCommandInput struct { - Id string -} - -//UpdateEngineListenerCommand - Update an Engine Listener -//RequestType: PUT -//Input: input *UpdateEngineListenerCommandInput -func (s *EngineListenersService) UpdateEngineListenerCommand(input *UpdateEngineListenerCommandInput) (result *EngineListenerView, resp *http.Response, err error) { - path := "/engineListeners/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateEngineListenerCommandInput struct { - Body EngineListenerView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engines.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engines.go deleted file mode 100644 index c5082693..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/engines.go +++ /dev/null @@ -1,286 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type EnginesService service - -type EnginesAPI interface { - GetEnginesCommand(input *GetEnginesCommandInput) (result *EnginesView, resp *http.Response, err error) - AddEngineCommand(input *AddEngineCommandInput) (result *EngineView, resp *http.Response, err error) - GetEngineCertificatesCommand(input *GetEngineCertificatesCommandInput) (result *EngineCertificateView, resp *http.Response, err error) - GetEngineCertificateCommand(input *GetEngineCertificateCommandInput) (result *EngineCertificateView, resp *http.Response, err error) - GetEngineStatusCommand() (result *EngineHealthStatusView, resp *http.Response, err error) - DeleteEngineCommand(input *DeleteEngineCommandInput) (resp *http.Response, err error) - GetEngineCommand(input *GetEngineCommandInput) (result *EngineView, resp *http.Response, err error) - UpdateEngineCommand(input *UpdateEngineCommandInput) (result *EngineView, resp *http.Response, err error) - GetEngineConfigFileCommand(input *GetEngineConfigFileCommandInput) (resp *http.Response, err error) -} - -//GetEnginesCommand - Get all Engines -//RequestType: GET -//Input: input *GetEnginesCommandInput -func (s *EnginesService) GetEnginesCommand(input *GetEnginesCommandInput) (result *EnginesView, resp *http.Response, err error) { - path := "/engines" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetEnginesCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddEngineCommand - Add an Engine -//RequestType: POST -//Input: input *AddEngineCommandInput -func (s *EnginesService) AddEngineCommand(input *AddEngineCommandInput) (result *EngineView, resp *http.Response, err error) { - path := "/engines" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddEngineCommandInput struct { - Body EngineView -} - -//GetEngineCertificatesCommand - Get all Engine Certificates -//RequestType: GET -//Input: input *GetEngineCertificatesCommandInput -func (s *EnginesService) GetEngineCertificatesCommand(input *GetEngineCertificatesCommandInput) (result *EngineCertificateView, resp *http.Response, err error) { - path := "/engines/certificates" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Alias != "" { - q.Set("alias", input.Alias) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetEngineCertificatesCommandInput struct { - Page string - NumberPerPage string - Filter string - Alias string - SortKey string - Order string -} - -//GetEngineCertificateCommand - Get an Engine Certificate -//RequestType: GET -//Input: input *GetEngineCertificateCommandInput -func (s *EnginesService) GetEngineCertificateCommand(input *GetEngineCertificateCommandInput) (result *EngineCertificateView, resp *http.Response, err error) { - path := "/engines/certificates/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetEngineCertificateCommandInput struct { - Id string -} - -//GetEngineStatusCommand - Get health status of all Engines -//RequestType: GET -//Input: -func (s *EnginesService) GetEngineStatusCommand() (result *EngineHealthStatusView, resp *http.Response, err error) { - path := "/engines/status" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//DeleteEngineCommand - Delete an Engine -//RequestType: DELETE -//Input: input *DeleteEngineCommandInput -func (s *EnginesService) DeleteEngineCommand(input *DeleteEngineCommandInput) (resp *http.Response, err error) { - path := "/engines/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteEngineCommandInput struct { - Id string -} - -//GetEngineCommand - Get an Engine -//RequestType: GET -//Input: input *GetEngineCommandInput -func (s *EnginesService) GetEngineCommand(input *GetEngineCommandInput) (result *EngineView, resp *http.Response, err error) { - path := "/engines/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetEngineCommandInput struct { - Id string -} - -//UpdateEngineCommand - Update an Engine -//RequestType: PUT -//Input: input *UpdateEngineCommandInput -func (s *EnginesService) UpdateEngineCommand(input *UpdateEngineCommandInput) (result *EngineView, resp *http.Response, err error) { - path := "/engines/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateEngineCommandInput struct { - Body EngineView - Id string -} - -//GetEngineConfigFileCommand - Get configuration file for an Engine -//RequestType: POST -//Input: input *GetEngineConfigFileCommandInput -func (s *EnginesService) GetEngineConfigFileCommand(input *GetEngineConfigFileCommandInput) (resp *http.Response, err error) { - path := "/engines/{id}/config" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type GetEngineConfigFileCommandInput struct { - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/globalUnprotectedResources.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/globalUnprotectedResources.go deleted file mode 100644 index 192265f7..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/globalUnprotectedResources.go +++ /dev/null @@ -1,165 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type GlobalUnprotectedResourcesService service - -type GlobalUnprotectedResourcesAPI interface { - GetGlobalUnprotectedResourcesCommand(input *GetGlobalUnprotectedResourcesCommandInput) (result *GlobalUnprotectedResourcesView, resp *http.Response, err error) - AddGlobalUnprotectedResourceCommand(input *AddGlobalUnprotectedResourceCommandInput) (result *GlobalUnprotectedResourceView, resp *http.Response, err error) - DeleteGlobalUnprotectedResourceCommand(input *DeleteGlobalUnprotectedResourceCommandInput) (resp *http.Response, err error) - GetGlobalUnprotectedResourceCommand(input *GetGlobalUnprotectedResourceCommandInput) (result *GlobalUnprotectedResourceView, resp *http.Response, err error) - UpdateGlobalUnprotectedResourceCommand(input *UpdateGlobalUnprotectedResourceCommandInput) (result *GlobalUnprotectedResourceView, resp *http.Response, err error) -} - -//GetGlobalUnprotectedResourcesCommand - Get all Global Unprotected Resources -//RequestType: GET -//Input: input *GetGlobalUnprotectedResourcesCommandInput -func (s *GlobalUnprotectedResourcesService) GetGlobalUnprotectedResourcesCommand(input *GetGlobalUnprotectedResourcesCommandInput) (result *GlobalUnprotectedResourcesView, resp *http.Response, err error) { - path := "/globalUnprotectedResources" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetGlobalUnprotectedResourcesCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddGlobalUnprotectedResourceCommand - Add a Global Unprotected Resource -//RequestType: POST -//Input: input *AddGlobalUnprotectedResourceCommandInput -func (s *GlobalUnprotectedResourcesService) AddGlobalUnprotectedResourceCommand(input *AddGlobalUnprotectedResourceCommandInput) (result *GlobalUnprotectedResourceView, resp *http.Response, err error) { - path := "/globalUnprotectedResources" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddGlobalUnprotectedResourceCommandInput struct { - Body GlobalUnprotectedResourceView -} - -//DeleteGlobalUnprotectedResourceCommand - Delete a Global Unprotected Resource -//RequestType: DELETE -//Input: input *DeleteGlobalUnprotectedResourceCommandInput -func (s *GlobalUnprotectedResourcesService) DeleteGlobalUnprotectedResourceCommand(input *DeleteGlobalUnprotectedResourceCommandInput) (resp *http.Response, err error) { - path := "/globalUnprotectedResources/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteGlobalUnprotectedResourceCommandInput struct { - Id string -} - -//GetGlobalUnprotectedResourceCommand - Get a Global Unprotected Resource -//RequestType: GET -//Input: input *GetGlobalUnprotectedResourceCommandInput -func (s *GlobalUnprotectedResourcesService) GetGlobalUnprotectedResourceCommand(input *GetGlobalUnprotectedResourceCommandInput) (result *GlobalUnprotectedResourceView, resp *http.Response, err error) { - path := "/globalUnprotectedResources/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetGlobalUnprotectedResourceCommandInput struct { - Id string -} - -//UpdateGlobalUnprotectedResourceCommand - Update a Global Unprotected Resource -//RequestType: PUT -//Input: input *UpdateGlobalUnprotectedResourceCommandInput -func (s *GlobalUnprotectedResourcesService) UpdateGlobalUnprotectedResourceCommand(input *UpdateGlobalUnprotectedResourceCommandInput) (result *GlobalUnprotectedResourceView, resp *http.Response, err error) { - path := "/globalUnprotectedResources/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateGlobalUnprotectedResourceCommandInput struct { - Body GlobalUnprotectedResourceView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/highAvailability.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/highAvailability.go deleted file mode 100644 index 8c0d1991..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/highAvailability.go +++ /dev/null @@ -1,409 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type HighAvailabilityService service - -type HighAvailabilityAPI interface { - GetAvailabilityProfilesCommand(input *GetAvailabilityProfilesCommandInput) (result *AvailabilityProfilesView, resp *http.Response, err error) - AddAvailabilityProfileCommand(input *AddAvailabilityProfileCommandInput) (result *AvailabilityProfileView, resp *http.Response, err error) - GetAvailabilityProfileDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) - GetAvailabilityProfileDescriptorCommand(input *GetAvailabilityProfileDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) - DeleteAvailabilityProfileCommand(input *DeleteAvailabilityProfileCommandInput) (resp *http.Response, err error) - GetAvailabilityProfileCommand(input *GetAvailabilityProfileCommandInput) (result *AvailabilityProfileView, resp *http.Response, err error) - UpdateAvailabilityProfileCommand(input *UpdateAvailabilityProfileCommandInput) (result *AvailabilityProfileView, resp *http.Response, err error) - GetLoadBalancingStrategiesCommand(input *GetLoadBalancingStrategiesCommandInput) (result *LoadBalancingStrategiesView, resp *http.Response, err error) - AddLoadBalancingStrategyCommand(input *AddLoadBalancingStrategyCommandInput) (result *LoadBalancingStrategyView, resp *http.Response, err error) - GetLoadBalancingStrategyDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) - GetLoadBalancingStrategyDescriptorCommand(input *GetLoadBalancingStrategyDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) - DeleteLoadBalancingStrategyCommand(input *DeleteLoadBalancingStrategyCommandInput) (resp *http.Response, err error) - GetLoadBalancingStrategyCommand(input *GetLoadBalancingStrategyCommandInput) (result *LoadBalancingStrategyView, resp *http.Response, err error) - UpdateLoadBalancingStrategyCommand(input *UpdateLoadBalancingStrategyCommandInput) (result *LoadBalancingStrategyView, resp *http.Response, err error) -} - -//GetAvailabilityProfilesCommand - Get all Availability Profiles -//RequestType: GET -//Input: input *GetAvailabilityProfilesCommandInput -func (s *HighAvailabilityService) GetAvailabilityProfilesCommand(input *GetAvailabilityProfilesCommandInput) (result *AvailabilityProfilesView, resp *http.Response, err error) { - path := "/highAvailability/availabilityProfiles" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAvailabilityProfilesCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddAvailabilityProfileCommand - Create an Availability Profile -//RequestType: POST -//Input: input *AddAvailabilityProfileCommandInput -func (s *HighAvailabilityService) AddAvailabilityProfileCommand(input *AddAvailabilityProfileCommandInput) (result *AvailabilityProfileView, resp *http.Response, err error) { - path := "/highAvailability/availabilityProfiles" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddAvailabilityProfileCommandInput struct { - Body AvailabilityProfileView -} - -//GetAvailabilityProfileDescriptorsCommand - Get descriptors for all Availability Profiles -//RequestType: GET -//Input: -func (s *HighAvailabilityService) GetAvailabilityProfileDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) { - path := "/highAvailability/availabilityProfiles/descriptors" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetAvailabilityProfileDescriptorCommand - Get a descriptor for an Availability Profile -//RequestType: GET -//Input: input *GetAvailabilityProfileDescriptorCommandInput -func (s *HighAvailabilityService) GetAvailabilityProfileDescriptorCommand(input *GetAvailabilityProfileDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) { - path := "/highAvailability/availabilityProfiles/descriptors/{availabilityProfileType}" - path = strings.Replace(path, "{availabilityProfileType}", input.AvailabilityProfileType, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAvailabilityProfileDescriptorCommandInput struct { - AvailabilityProfileType string -} - -//DeleteAvailabilityProfileCommand - Delete an Availability Profile -//RequestType: DELETE -//Input: input *DeleteAvailabilityProfileCommandInput -func (s *HighAvailabilityService) DeleteAvailabilityProfileCommand(input *DeleteAvailabilityProfileCommandInput) (resp *http.Response, err error) { - path := "/highAvailability/availabilityProfiles/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteAvailabilityProfileCommandInput struct { - Id string -} - -//GetAvailabilityProfileCommand - Get an Availability Profile -//RequestType: GET -//Input: input *GetAvailabilityProfileCommandInput -func (s *HighAvailabilityService) GetAvailabilityProfileCommand(input *GetAvailabilityProfileCommandInput) (result *AvailabilityProfileView, resp *http.Response, err error) { - path := "/highAvailability/availabilityProfiles/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetAvailabilityProfileCommandInput struct { - Id string -} - -//UpdateAvailabilityProfileCommand - Update an Availability Profile -//RequestType: PUT -//Input: input *UpdateAvailabilityProfileCommandInput -func (s *HighAvailabilityService) UpdateAvailabilityProfileCommand(input *UpdateAvailabilityProfileCommandInput) (result *AvailabilityProfileView, resp *http.Response, err error) { - path := "/highAvailability/availabilityProfiles/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateAvailabilityProfileCommandInput struct { - Body AvailabilityProfileView - Id string -} - -//GetLoadBalancingStrategiesCommand - Get all Load Balancing Strategies -//RequestType: GET -//Input: input *GetLoadBalancingStrategiesCommandInput -func (s *HighAvailabilityService) GetLoadBalancingStrategiesCommand(input *GetLoadBalancingStrategiesCommandInput) (result *LoadBalancingStrategiesView, resp *http.Response, err error) { - path := "/highAvailability/loadBalancingStrategies" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetLoadBalancingStrategiesCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddLoadBalancingStrategyCommand - Create a Load Balancing Strategy -//RequestType: POST -//Input: input *AddLoadBalancingStrategyCommandInput -func (s *HighAvailabilityService) AddLoadBalancingStrategyCommand(input *AddLoadBalancingStrategyCommandInput) (result *LoadBalancingStrategyView, resp *http.Response, err error) { - path := "/highAvailability/loadBalancingStrategies" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddLoadBalancingStrategyCommandInput struct { - Body LoadBalancingStrategyView -} - -//GetLoadBalancingStrategyDescriptorsCommand - Get descriptors for all Load Balancing Strategies -//RequestType: GET -//Input: -func (s *HighAvailabilityService) GetLoadBalancingStrategyDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) { - path := "/highAvailability/loadBalancingStrategies/descriptors" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetLoadBalancingStrategyDescriptorCommand - Get a descriptor for a Load Balancing Strategy -//RequestType: GET -//Input: input *GetLoadBalancingStrategyDescriptorCommandInput -func (s *HighAvailabilityService) GetLoadBalancingStrategyDescriptorCommand(input *GetLoadBalancingStrategyDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) { - path := "/highAvailability/loadBalancingStrategies/descriptors/{loadBalancingStrategyType}" - path = strings.Replace(path, "{loadBalancingStrategyType}", input.LoadBalancingStrategyType, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetLoadBalancingStrategyDescriptorCommandInput struct { - LoadBalancingStrategyType string -} - -//DeleteLoadBalancingStrategyCommand - Delete a Load Balancing Strategy -//RequestType: DELETE -//Input: input *DeleteLoadBalancingStrategyCommandInput -func (s *HighAvailabilityService) DeleteLoadBalancingStrategyCommand(input *DeleteLoadBalancingStrategyCommandInput) (resp *http.Response, err error) { - path := "/highAvailability/loadBalancingStrategies/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteLoadBalancingStrategyCommandInput struct { - Id string -} - -//GetLoadBalancingStrategyCommand - Get a Load Balancing Strategy -//RequestType: GET -//Input: input *GetLoadBalancingStrategyCommandInput -func (s *HighAvailabilityService) GetLoadBalancingStrategyCommand(input *GetLoadBalancingStrategyCommandInput) (result *LoadBalancingStrategyView, resp *http.Response, err error) { - path := "/highAvailability/loadBalancingStrategies/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetLoadBalancingStrategyCommandInput struct { - Id string -} - -//UpdateLoadBalancingStrategyCommand - Update a Load Balancing Strategy -//RequestType: PUT -//Input: input *UpdateLoadBalancingStrategyCommandInput -func (s *HighAvailabilityService) UpdateLoadBalancingStrategyCommand(input *UpdateLoadBalancingStrategyCommandInput) (result *LoadBalancingStrategyView, resp *http.Response, err error) { - path := "/highAvailability/loadBalancingStrategies/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateLoadBalancingStrategyCommandInput struct { - Body LoadBalancingStrategyView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/hsmProviders.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/hsmProviders.go deleted file mode 100644 index 2d011113..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/hsmProviders.go +++ /dev/null @@ -1,185 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type HsmProvidersService service - -type HsmProvidersAPI interface { - GetHsmProvidersCommand(input *GetHsmProvidersCommandInput) (result *HsmProviderView, resp *http.Response, err error) - AddHsmProviderCommand(input *AddHsmProviderCommandInput) (result *HsmProviderView, resp *http.Response, err error) - GetHsmProviderDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) - DeleteHsmProviderCommand(input *DeleteHsmProviderCommandInput) (resp *http.Response, err error) - GetHsmProviderCommand(input *GetHsmProviderCommandInput) (result *HsmProviderView, resp *http.Response, err error) - UpdateHsmProviderCommand(input *UpdateHsmProviderCommandInput) (result *HsmProviderView, resp *http.Response, err error) -} - -//GetHsmProvidersCommand - Get all HSM Providers -//RequestType: GET -//Input: input *GetHsmProvidersCommandInput -func (s *HsmProvidersService) GetHsmProvidersCommand(input *GetHsmProvidersCommandInput) (result *HsmProviderView, resp *http.Response, err error) { - path := "/hsmProviders" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetHsmProvidersCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddHsmProviderCommand - Create an HSM Provider -//RequestType: POST -//Input: input *AddHsmProviderCommandInput -func (s *HsmProvidersService) AddHsmProviderCommand(input *AddHsmProviderCommandInput) (result *HsmProviderView, resp *http.Response, err error) { - path := "/hsmProviders" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddHsmProviderCommandInput struct { - Body HsmProviderView -} - -//GetHsmProviderDescriptorsCommand - Get descriptors for all HSM Providers -//RequestType: GET -//Input: -func (s *HsmProvidersService) GetHsmProviderDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) { - path := "/hsmProviders/descriptors" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//DeleteHsmProviderCommand - Delete an HSM Provider -//RequestType: DELETE -//Input: input *DeleteHsmProviderCommandInput -func (s *HsmProvidersService) DeleteHsmProviderCommand(input *DeleteHsmProviderCommandInput) (resp *http.Response, err error) { - path := "/hsmProviders/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteHsmProviderCommandInput struct { - Id string -} - -//GetHsmProviderCommand - Get an HSM Provider -//RequestType: GET -//Input: input *GetHsmProviderCommandInput -func (s *HsmProvidersService) GetHsmProviderCommand(input *GetHsmProviderCommandInput) (result *HsmProviderView, resp *http.Response, err error) { - path := "/hsmProviders/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetHsmProviderCommandInput struct { - Id string -} - -//UpdateHsmProviderCommand - Update an HSM Provider -//RequestType: PUT -//Input: input *UpdateHsmProviderCommandInput -func (s *HsmProvidersService) UpdateHsmProviderCommand(input *UpdateHsmProviderCommandInput) (result *HsmProviderView, resp *http.Response, err error) { - path := "/hsmProviders/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateHsmProviderCommandInput struct { - Body HsmProviderView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpConfig.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpConfig.go deleted file mode 100644 index 7edbd000..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpConfig.go +++ /dev/null @@ -1,268 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type HttpConfigService service - -type HttpConfigAPI interface { - DeleteHttpMonitoringCommand() (resp *http.Response, err error) - GetHttpMonitoringCommand() (result *HttpMonitoringView, resp *http.Response, err error) - UpdateHttpMonitoringCommand(input *UpdateHttpMonitoringCommandInput) (result *HttpMonitoringView, resp *http.Response, err error) - DeleteHostSourceCommand() (resp *http.Response, err error) - GetHostSourceCommand() (result *HostMultiValueSourceView, resp *http.Response, err error) - UpdateHostSourceCommand(input *UpdateHostSourceCommandInput) (result *HostMultiValueSourceView, resp *http.Response, err error) - DeleteIpSourceCommand() (resp *http.Response, err error) - GetIpSourceCommand() (result *IpMultiValueSourceView, resp *http.Response, err error) - UpdateIpSourceCommand(input *UpdateIpSourceCommandInput) (result *IpMultiValueSourceView, resp *http.Response, err error) - DeleteProtoSourceCommand() (resp *http.Response, err error) - GetProtoSourceCommand() (result *ProtocolSourceView, resp *http.Response, err error) - UpdateProtocolSourceCommand(input *UpdateProtocolSourceCommandInput) (result *ProtocolSourceView, resp *http.Response, err error) -} - -//DeleteHttpMonitoringCommand - Resets the HTTP monitoring auditLevel to default value -//RequestType: DELETE -//Input: -func (s *HttpConfigService) DeleteHttpMonitoringCommand() (resp *http.Response, err error) { - path := "/httpConfig/monitoring" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetHttpMonitoringCommand - Get the HTTP monitoring auditLevel -//RequestType: GET -//Input: -func (s *HttpConfigService) GetHttpMonitoringCommand() (result *HttpMonitoringView, resp *http.Response, err error) { - path := "/httpConfig/monitoring" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateHttpMonitoringCommand - Update the HTTP monitoring auditLevel -//RequestType: PUT -//Input: input *UpdateHttpMonitoringCommandInput -func (s *HttpConfigService) UpdateHttpMonitoringCommand(input *UpdateHttpMonitoringCommandInput) (result *HttpMonitoringView, resp *http.Response, err error) { - path := "/httpConfig/monitoring" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateHttpMonitoringCommandInput struct { - Body HttpMonitoringView -} - -//DeleteHostSourceCommand - Resets the HTTP request Host Source type to default values -//RequestType: DELETE -//Input: -func (s *HttpConfigService) DeleteHostSourceCommand() (resp *http.Response, err error) { - path := "/httpConfig/request/hostSource" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetHostSourceCommand - Get the HTTP request Host Source type -//RequestType: GET -//Input: -func (s *HttpConfigService) GetHostSourceCommand() (result *HostMultiValueSourceView, resp *http.Response, err error) { - path := "/httpConfig/request/hostSource" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateHostSourceCommand - Update the HTTP request Host Source type -//RequestType: PUT -//Input: input *UpdateHostSourceCommandInput -func (s *HttpConfigService) UpdateHostSourceCommand(input *UpdateHostSourceCommandInput) (result *HostMultiValueSourceView, resp *http.Response, err error) { - path := "/httpConfig/request/hostSource" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateHostSourceCommandInput struct { - Body HostMultiValueSourceView -} - -//DeleteIpSourceCommand - Resets the HTTP request IP Source type to default values -//RequestType: DELETE -//Input: -func (s *HttpConfigService) DeleteIpSourceCommand() (resp *http.Response, err error) { - path := "/httpConfig/request/ipSource" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetIpSourceCommand - Get the HTTP request IP Source type -//RequestType: GET -//Input: -func (s *HttpConfigService) GetIpSourceCommand() (result *IpMultiValueSourceView, resp *http.Response, err error) { - path := "/httpConfig/request/ipSource" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateIpSourceCommand - Update the HTTP request IP Source type -//RequestType: PUT -//Input: input *UpdateIpSourceCommandInput -func (s *HttpConfigService) UpdateIpSourceCommand(input *UpdateIpSourceCommandInput) (result *IpMultiValueSourceView, resp *http.Response, err error) { - path := "/httpConfig/request/ipSource" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateIpSourceCommandInput struct { - Body IpMultiValueSourceView -} - -//DeleteProtoSourceCommand - Resets the HTTP request Protocol Source type to default values -//RequestType: DELETE -//Input: -func (s *HttpConfigService) DeleteProtoSourceCommand() (resp *http.Response, err error) { - path := "/httpConfig/request/protocolSource" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetProtoSourceCommand - Get the HTTP request Protocol Source type -//RequestType: GET -//Input: -func (s *HttpConfigService) GetProtoSourceCommand() (result *ProtocolSourceView, resp *http.Response, err error) { - path := "/httpConfig/request/protocolSource" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateProtocolSourceCommand - Update the HTTP request Protocol Source type -//RequestType: PUT -//Input: input *UpdateProtocolSourceCommandInput -func (s *HttpConfigService) UpdateProtocolSourceCommand(input *UpdateProtocolSourceCommandInput) (result *ProtocolSourceView, resp *http.Response, err error) { - path := "/httpConfig/request/protocolSource" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateProtocolSourceCommandInput struct { - Body ProtocolSourceView -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpsListeners.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpsListeners.go deleted file mode 100644 index 56deb1de..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/httpsListeners.go +++ /dev/null @@ -1,99 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type HttpsListenersService service - -type HttpsListenersAPI interface { - GetHttpsListenersCommand(input *GetHttpsListenersCommandInput) (result *HttpsListenersView, resp *http.Response, err error) - GetHttpsListenerCommand(input *GetHttpsListenerCommandInput) (result *HttpsListenerView, resp *http.Response, err error) - UpdateHttpsListener(input *UpdateHttpsListenerInput) (result *HttpsListenerView, resp *http.Response, err error) -} - -//GetHttpsListenersCommand - Get all HTTPS Listeners -//RequestType: GET -//Input: input *GetHttpsListenersCommandInput -func (s *HttpsListenersService) GetHttpsListenersCommand(input *GetHttpsListenersCommandInput) (result *HttpsListenersView, resp *http.Response, err error) { - path := "/httpsListeners" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetHttpsListenersCommandInput struct { - SortKey string - Order string -} - -//GetHttpsListenerCommand - Get an HTTPS Listener -//RequestType: GET -//Input: input *GetHttpsListenerCommandInput -func (s *HttpsListenersService) GetHttpsListenerCommand(input *GetHttpsListenerCommandInput) (result *HttpsListenerView, resp *http.Response, err error) { - path := "/httpsListeners/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetHttpsListenerCommandInput struct { - Id string -} - -//UpdateHttpsListener - Update an HTTPS Listener -//RequestType: PUT -//Input: input *UpdateHttpsListenerInput -func (s *HttpsListenersService) UpdateHttpsListener(input *UpdateHttpsListenerInput) (result *HttpsListenerView, resp *http.Response, err error) { - path := "/httpsListeners/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateHttpsListenerInput struct { - Body HttpsListenerView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/identityMappings.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/identityMappings.go deleted file mode 100644 index 02be8649..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/identityMappings.go +++ /dev/null @@ -1,211 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type IdentityMappingsService service - -type IdentityMappingsAPI interface { - GetIdentityMappingsCommand(input *GetIdentityMappingsCommandInput) (result *IdentityMappingsView, resp *http.Response, err error) - AddIdentityMappingCommand(input *AddIdentityMappingCommandInput) (result *IdentityMappingView, resp *http.Response, err error) - GetIdentityMappingDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) - GetIdentityMappingDescriptorCommand(input *GetIdentityMappingDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) - DeleteIdentityMappingCommand(input *DeleteIdentityMappingCommandInput) (resp *http.Response, err error) - GetIdentityMappingCommand(input *GetIdentityMappingCommandInput) (result *IdentityMappingView, resp *http.Response, err error) - UpdateIdentityMappingCommand(input *UpdateIdentityMappingCommandInput) (result *IdentityMappingView, resp *http.Response, err error) -} - -//GetIdentityMappingsCommand - Get all Identity Mappings -//RequestType: GET -//Input: input *GetIdentityMappingsCommandInput -func (s *IdentityMappingsService) GetIdentityMappingsCommand(input *GetIdentityMappingsCommandInput) (result *IdentityMappingsView, resp *http.Response, err error) { - path := "/identityMappings" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetIdentityMappingsCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddIdentityMappingCommand - Create an Identity Mapping -//RequestType: POST -//Input: input *AddIdentityMappingCommandInput -func (s *IdentityMappingsService) AddIdentityMappingCommand(input *AddIdentityMappingCommandInput) (result *IdentityMappingView, resp *http.Response, err error) { - path := "/identityMappings" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddIdentityMappingCommandInput struct { - Body IdentityMappingView -} - -//GetIdentityMappingDescriptorsCommand - Get descriptors for all supported Identity Mapping types -//RequestType: GET -//Input: -func (s *IdentityMappingsService) GetIdentityMappingDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) { - path := "/identityMappings/descriptors" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetIdentityMappingDescriptorCommand - Get descriptor for an Identity Mapping type -//RequestType: GET -//Input: input *GetIdentityMappingDescriptorCommandInput -func (s *IdentityMappingsService) GetIdentityMappingDescriptorCommand(input *GetIdentityMappingDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) { - path := "/identityMappings/descriptors/{identityMappingType}" - path = strings.Replace(path, "{identityMappingType}", input.IdentityMappingType, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetIdentityMappingDescriptorCommandInput struct { - IdentityMappingType string -} - -//DeleteIdentityMappingCommand - Delete an Identity Mapping -//RequestType: DELETE -//Input: input *DeleteIdentityMappingCommandInput -func (s *IdentityMappingsService) DeleteIdentityMappingCommand(input *DeleteIdentityMappingCommandInput) (resp *http.Response, err error) { - path := "/identityMappings/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteIdentityMappingCommandInput struct { - Id string -} - -//GetIdentityMappingCommand - Get an Identity Mapping -//RequestType: GET -//Input: input *GetIdentityMappingCommandInput -func (s *IdentityMappingsService) GetIdentityMappingCommand(input *GetIdentityMappingCommandInput) (result *IdentityMappingView, resp *http.Response, err error) { - path := "/identityMappings/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetIdentityMappingCommandInput struct { - Id string -} - -//UpdateIdentityMappingCommand - Update an Identity Mapping -//RequestType: PUT -//Input: input *UpdateIdentityMappingCommandInput -func (s *IdentityMappingsService) UpdateIdentityMappingCommand(input *UpdateIdentityMappingCommandInput) (result *IdentityMappingView, resp *http.Response, err error) { - path := "/identityMappings/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateIdentityMappingCommandInput struct { - Body IdentityMappingView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/keyPairs.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/keyPairs.go deleted file mode 100644 index f1f015e8..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/keyPairs.go +++ /dev/null @@ -1,391 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type KeyPairsService service - -type KeyPairsAPI interface { - GetKeyPairsCommand(input *GetKeyPairsCommandInput) (result *KeyPairsView, resp *http.Response, err error) - GenerateKeyPairCommand(input *GenerateKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) - ImportKeyPairCommand(input *ImportKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) - KeyAlgorithms() (result *KeyAlgorithmsView, resp *http.Response, err error) - GetKeypairsCreatableGeneralNamesCommand() (result *SanTypes, resp *http.Response, err error) - DeleteKeyPairCommand(input *DeleteKeyPairCommandInput) (resp *http.Response, err error) - GetKeyPairCommand(input *GetKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) - PatchKeyPairCommand(input *PatchKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) - UpdateKeyPairCommand(input *UpdateKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) - ExportKeyPairCert(input *ExportKeyPairCertInput) (resp *http.Response, err error) - GenerateCsrCommand(input *GenerateCsrCommandInput) (resp *http.Response, err error) - ImportCSRResponseCommand(input *ImportCSRResponseCommandInput) (result *KeyPairView, resp *http.Response, err error) - ExportKeyPair(input *ExportKeyPairInput) (resp *http.Response, err error) - DeleteChainCertificateCommand(input *DeleteChainCertificateCommandInput) (resp *http.Response, err error) -} - -//GetKeyPairsCommand - Get all Key Pairs -//RequestType: GET -//Input: input *GetKeyPairsCommandInput -func (s *KeyPairsService) GetKeyPairsCommand(input *GetKeyPairsCommandInput) (result *KeyPairsView, resp *http.Response, err error) { - path := "/keyPairs" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Alias != "" { - q.Set("alias", input.Alias) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetKeyPairsCommandInput struct { - Page string - NumberPerPage string - Filter string - Alias string - SortKey string - Order string -} - -//GenerateKeyPairCommand - Generate a Key Pair -//RequestType: POST -//Input: input *GenerateKeyPairCommandInput -func (s *KeyPairsService) GenerateKeyPairCommand(input *GenerateKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) { - path := "/keyPairs/generate" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GenerateKeyPairCommandInput struct { - Body NewKeyPairConfigView -} - -//ImportKeyPairCommand - Import a Key Pair from a PKCS12 file -//RequestType: POST -//Input: input *ImportKeyPairCommandInput -func (s *KeyPairsService) ImportKeyPairCommand(input *ImportKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) { - path := "/keyPairs/import" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type ImportKeyPairCommandInput struct { - Body PKCS12FileImportDocView -} - -//KeyAlgorithms - Get the key algorithms supported by Key Pair generation -//RequestType: GET -//Input: -func (s *KeyPairsService) KeyAlgorithms() (result *KeyAlgorithmsView, resp *http.Response, err error) { - path := "/keyPairs/keyAlgorithms" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetKeypairsCreatableGeneralNamesCommand - Get the valid General Names for creating Subject Alternative Names -//RequestType: GET -//Input: -func (s *KeyPairsService) GetKeypairsCreatableGeneralNamesCommand() (result *SanTypes, resp *http.Response, err error) { - path := "/keyPairs/subjectAlternativeTypes" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//DeleteKeyPairCommand - Delete a Key Pair -//RequestType: DELETE -//Input: input *DeleteKeyPairCommandInput -func (s *KeyPairsService) DeleteKeyPairCommand(input *DeleteKeyPairCommandInput) (resp *http.Response, err error) { - path := "/keyPairs/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteKeyPairCommandInput struct { - Id string -} - -//GetKeyPairCommand - Get a Key Pair -//RequestType: GET -//Input: input *GetKeyPairCommandInput -func (s *KeyPairsService) GetKeyPairCommand(input *GetKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) { - path := "/keyPairs/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetKeyPairCommandInput struct { - Id string -} - -//PatchKeyPairCommand - Update the chainCertificates of a Key Pair -//RequestType: PATCH -//Input: input *PatchKeyPairCommandInput -func (s *KeyPairsService) PatchKeyPairCommand(input *PatchKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) { - path := "/keyPairs/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PATCH", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type PatchKeyPairCommandInput struct { - Body ChainCertificatesDocView - Id string -} - -//UpdateKeyPairCommand - Update a Key Pair -//RequestType: PUT -//Input: input *UpdateKeyPairCommandInput -func (s *KeyPairsService) UpdateKeyPairCommand(input *UpdateKeyPairCommandInput) (result *KeyPairView, resp *http.Response, err error) { - path := "/keyPairs/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateKeyPairCommandInput struct { - Body PKCS12FileImportDocView - Id string -} - -//ExportKeyPairCert - Export only the Certificate from a Key Pair -//RequestType: GET -//Input: input *ExportKeyPairCertInput -func (s *KeyPairsService) ExportKeyPairCert(input *ExportKeyPairCertInput) (resp *http.Response, err error) { - path := "/keyPairs/{id}/certificate" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type ExportKeyPairCertInput struct { - Id string -} - -//GenerateCsrCommand - Generate a Certificate Signing Request for a Key Pair -//RequestType: GET -//Input: input *GenerateCsrCommandInput -func (s *KeyPairsService) GenerateCsrCommand(input *GenerateCsrCommandInput) (resp *http.Response, err error) { - path := "/keyPairs/{id}/csr" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type GenerateCsrCommandInput struct { - Id string -} - -//ImportCSRResponseCommand - Import a Certificate Signing Request response -//RequestType: POST -//Input: input *ImportCSRResponseCommandInput -func (s *KeyPairsService) ImportCSRResponseCommand(input *ImportCSRResponseCommandInput) (result *KeyPairView, resp *http.Response, err error) { - path := "/keyPairs/{id}/csr" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type ImportCSRResponseCommandInput struct { - Body CSRResponseImportDocView - Id string -} - -//ExportKeyPair - Export a Key Pair in the PKCS12 file format -//RequestType: POST -//Input: input *ExportKeyPairInput -func (s *KeyPairsService) ExportKeyPair(input *ExportKeyPairInput) (resp *http.Response, err error) { - path := "/keyPairs/{id}/pkcs12" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type ExportKeyPairInput struct { - Body ExportParameters - Id string -} - -//DeleteChainCertificateCommand - Delete a Chain Certificate -//RequestType: DELETE -//Input: input *DeleteChainCertificateCommandInput -func (s *KeyPairsService) DeleteChainCertificateCommand(input *DeleteChainCertificateCommandInput) (resp *http.Response, err error) { - path := "/keyPairs/{keyPairId}/chainCertificates/{chainCertificateId}" - path = strings.Replace(path, "{keyPairId}", input.KeyPairId, -1) - - path = strings.Replace(path, "{chainCertificateId}", input.ChainCertificateId, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteChainCertificateCommandInput struct { - KeyPairId string - ChainCertificateId string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/license.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/license.go deleted file mode 100644 index 699f1cbb..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/license.go +++ /dev/null @@ -1,56 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type LicenseService service - -type LicenseAPI interface { - GetLicenseCommand() (result *LicenseView, resp *http.Response, err error) - ImportLicenseCommand(input *ImportLicenseCommandInput) (result *LicenseView, resp *http.Response, err error) -} - -//GetLicenseCommand - Get the License File -//RequestType: GET -//Input: -func (s *LicenseService) GetLicenseCommand() (result *LicenseView, resp *http.Response, err error) { - path := "/license" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//ImportLicenseCommand - Import a License -//RequestType: POST -//Input: input *ImportLicenseCommandInput -func (s *LicenseService) ImportLicenseCommand(input *ImportLicenseCommandInput) (result *LicenseView, resp *http.Response, err error) { - path := "/license" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type ImportLicenseCommandInput struct { - Body LicenseImportDocView -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models/models.go similarity index 95% rename from vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go rename to vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models/models.go index 5592cf8e..648fadd7 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/models/models.go @@ -1,4 +1,4 @@ -package pingaccess +package models import ( "encoding/json" @@ -718,32 +718,32 @@ type OAuthKeyManagementView struct { //OIDCProviderMetadata - The OpenID Connect provider's metadata. type OIDCProviderMetadata struct { - Authorization_endpoint *string `json:"authorization_endpoint"` - Backchannel_authentication_endpoint *string `json:"backchannel_authentication_endpoint"` - Claim_types_supported *[]*string `json:"claim_types_supported"` - Claims_parameter_supported *bool `json:"claims_parameter_supported"` - Claims_supported *[]*string `json:"claims_supported"` - Code_challenge_methods_supported *[]*string `json:"code_challenge_methods_supported"` - End_session_endpoint *string `json:"end_session_endpoint"` - Grant_types_supported *[]*string `json:"grant_types_supported"` - Id_token_signing_alg_values_supported *[]*string `json:"id_token_signing_alg_values_supported"` - Introspection_endpoint *string `json:"introspection_endpoint"` - Issuer *string `json:"issuer"` - Jwks_uri *string `json:"jwks_uri"` - Ping_end_session_endpoint *string `json:"ping_end_session_endpoint"` - Ping_revoked_sris_endpoint *string `json:"ping_revoked_sris_endpoint"` - Request_object_signing_alg_values_supported *[]*string `json:"request_object_signing_alg_values_supported"` - Request_parameter_supported *bool `json:"request_parameter_supported"` - Request_uri_parameter_supported *bool `json:"request_uri_parameter_supported"` - Response_modes_supported *[]*string `json:"response_modes_supported"` - Response_types_supported *[]*string `json:"response_types_supported"` - Revocation_endpoint *string `json:"revocation_endpoint"` - Scopes_supported *[]*string `json:"scopes_supported"` - Subject_types_supported *[]*string `json:"subject_types_supported"` - Token_endpoint *string `json:"token_endpoint"` - Token_endpoint_auth_methods_supported *[]*string `json:"token_endpoint_auth_methods_supported"` - Userinfo_endpoint *string `json:"userinfo_endpoint"` - Userinfo_signing_alg_values_supported *[]*string `json:"userinfo_signing_alg_values_supported"` + AuthorizationEndpoint *string `json:"authorization_endpoint"` + BackchannelAuthenticationEndpoint *string `json:"backchannel_authentication_endpoint"` + ClaimTypesSupported *[]*string `json:"claim_types_supported"` + ClaimsParameterSupported *bool `json:"claims_parameter_supported"` + ClaimsSupported *[]*string `json:"claims_supported"` + CodeChallengeMethodsSupported *[]*string `json:"code_challenge_methods_supported"` + EndSessionEndpoint *string `json:"end_session_endpoint"` + GrantTypesSupported *[]*string `json:"grant_types_supported"` + IdTokenSigningAlgValuesSupported *[]*string `json:"id_token_signing_alg_values_supported"` + IntrospectionEndpoint *string `json:"introspection_endpoint"` + Issuer *string `json:"issuer"` + JwksUri *string `json:"jwks_uri"` + PingEndSessionEndpoint *string `json:"ping_end_session_endpoint"` + PingRevokedSrisEndpoint *string `json:"ping_revoked_sris_endpoint"` + RequestObjectSigningAlgValuesSupported *[]*string `json:"request_object_signing_alg_values_supported"` + RequestParameterSupported *bool `json:"request_parameter_supported"` + RequestUriParameterSupported *bool `json:"request_uri_parameter_supported"` + ResponseModesSupported *[]*string `json:"response_modes_supported"` + ResponseTypesSupported *[]*string `json:"response_types_supported"` + RevocationEndpoint *string `json:"revocation_endpoint"` + ScopesSupported *[]*string `json:"scopes_supported"` + SubjectTypesSupported *[]*string `json:"subject_types_supported"` + TokenEndpoint *string `json:"token_endpoint"` + TokenEndpointAuthMethodsSupported *[]*string `json:"token_endpoint_auth_methods_supported"` + UserinfoEndpoint *string `json:"userinfo_endpoint"` + UserinfoSigningAlgValuesSupported *[]*string `json:"userinfo_signing_alg_values_supported"` } //OIDCProviderPluginView - An OpenID Connect provider plugin. diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauth.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauth.go deleted file mode 100644 index f2e04962..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauth.go +++ /dev/null @@ -1,76 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type OauthService service - -type OauthAPI interface { - DeleteAuthorizationServerCommand() (resp *http.Response, err error) - GetAuthorizationServerCommand() (result *AuthorizationServerView, resp *http.Response, err error) - UpdateAuthorizationServerCommand(input *UpdateAuthorizationServerCommandInput) (result *AuthorizationServerView, resp *http.Response, err error) -} - -//DeleteAuthorizationServerCommand - Resets the OpenID Connect Provider configuration to default values -//RequestType: DELETE -//Input: -func (s *OauthService) DeleteAuthorizationServerCommand() (resp *http.Response, err error) { - path := "/oauth/authServer" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetAuthorizationServerCommand - Get Authorization Server configuration -//RequestType: GET -//Input: -func (s *OauthService) GetAuthorizationServerCommand() (result *AuthorizationServerView, resp *http.Response, err error) { - path := "/oauth/authServer" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateAuthorizationServerCommand - Update OAuth 2.0 Authorization Server configuration -//RequestType: PUT -//Input: input *UpdateAuthorizationServerCommandInput -func (s *OauthService) UpdateAuthorizationServerCommand(input *UpdateAuthorizationServerCommandInput) (result *AuthorizationServerView, resp *http.Response, err error) { - path := "/oauth/authServer" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateAuthorizationServerCommandInput struct { - Body AuthorizationServerView -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauthKeyManagement.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauthKeyManagement.go deleted file mode 100644 index ac5b98e5..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oauthKeyManagement.go +++ /dev/null @@ -1,120 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type OauthKeyManagementService service - -type OauthKeyManagementAPI interface { - DeleteOAuthKeyManagementCommand() (resp *http.Response, err error) - GetOAuthKeyManagementCommand() (result *OAuthKeyManagementView, resp *http.Response, err error) - UpdateOAuthKeyManagementCommand(input *UpdateOAuthKeyManagementCommandInput) (result *OAuthKeyManagementView, resp *http.Response, err error) - GetOAuthKeySetCommand() (result *KeySetView, resp *http.Response, err error) - UpdateOAuthKeySetCommand(input *UpdateOAuthKeySetCommandInput) (result *KeySetView, resp *http.Response, err error) -} - -//DeleteOAuthKeyManagementCommand - Resets the OAuth Key Management configuration to default values -//RequestType: DELETE -//Input: -func (s *OauthKeyManagementService) DeleteOAuthKeyManagementCommand() (resp *http.Response, err error) { - path := "/oauthKeyManagement" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetOAuthKeyManagementCommand - Get the OAuth Key Management configuration -//RequestType: GET -//Input: -func (s *OauthKeyManagementService) GetOAuthKeyManagementCommand() (result *OAuthKeyManagementView, resp *http.Response, err error) { - path := "/oauthKeyManagement" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateOAuthKeyManagementCommand - Update the OAuth Key Management configuration -//RequestType: PUT -//Input: input *UpdateOAuthKeyManagementCommandInput -func (s *OauthKeyManagementService) UpdateOAuthKeyManagementCommand(input *UpdateOAuthKeyManagementCommandInput) (result *OAuthKeyManagementView, resp *http.Response, err error) { - path := "/oauthKeyManagement" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateOAuthKeyManagementCommandInput struct { - Body OAuthKeyManagementView -} - -//GetOAuthKeySetCommand - Get the OAuth key set configuration -//RequestType: GET -//Input: -func (s *OauthKeyManagementService) GetOAuthKeySetCommand() (result *KeySetView, resp *http.Response, err error) { - path := "/oauthKeyManagement/keySet" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateOAuthKeySetCommand - Update the OAuth key set configuration -//RequestType: PUT -//Input: input *UpdateOAuthKeySetCommandInput -func (s *OauthKeyManagementService) UpdateOAuthKeySetCommand(input *UpdateOAuthKeySetCommandInput) (result *KeySetView, resp *http.Response, err error) { - path := "/oauthKeyManagement/keySet" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateOAuthKeySetCommandInput struct { - Body KeySetView -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oidc.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oidc.go deleted file mode 100644 index 36424c23..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/oidc.go +++ /dev/null @@ -1,143 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type OidcService service - -type OidcAPI interface { - DeleteOIDCProviderCommand() (resp *http.Response, err error) - GetOIDCProviderCommand() (result *OIDCProviderView, resp *http.Response, err error) - UpdateOIDCProviderCommand(input *UpdateOIDCProviderCommandInput) (result *OIDCProviderView, resp *http.Response, err error) - GetOIDCProviderPluginDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) - GetOIDCProviderPluginDescriptorCommand(input *GetOIDCProviderPluginDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) - GetOIDCProviderMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) -} - -//DeleteOIDCProviderCommand - Resets the OpenID Connect Provider configuration to default values -//RequestType: DELETE -//Input: -func (s *OidcService) DeleteOIDCProviderCommand() (resp *http.Response, err error) { - path := "/oidc/provider" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetOIDCProviderCommand - Get the OpenID Connect Provider configuration -//RequestType: GET -//Input: -func (s *OidcService) GetOIDCProviderCommand() (result *OIDCProviderView, resp *http.Response, err error) { - path := "/oidc/provider" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateOIDCProviderCommand - Update the OpenID Connect Provider configuration -//RequestType: PUT -//Input: input *UpdateOIDCProviderCommandInput -func (s *OidcService) UpdateOIDCProviderCommand(input *UpdateOIDCProviderCommandInput) (result *OIDCProviderView, resp *http.Response, err error) { - path := "/oidc/provider" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateOIDCProviderCommandInput struct { - Body OIDCProviderView -} - -//GetOIDCProviderPluginDescriptorsCommand - Get descriptors for all OIDC Provider plugins -//RequestType: GET -//Input: -func (s *OidcService) GetOIDCProviderPluginDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) { - path := "/oidc/provider/descriptors" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetOIDCProviderPluginDescriptorCommand - Get a descriptor for a OIDC Provider plugin -//RequestType: GET -//Input: input *GetOIDCProviderPluginDescriptorCommandInput -func (s *OidcService) GetOIDCProviderPluginDescriptorCommand(input *GetOIDCProviderPluginDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) { - path := "/oidc/provider/descriptors/{pluginType}" - path = strings.Replace(path, "{pluginType}", input.PluginType, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetOIDCProviderPluginDescriptorCommandInput struct { - PluginType string -} - -//GetOIDCProviderMetadataCommand - Get the OpenID Connect Provider's metadata -//RequestType: GET -//Input: -func (s *OidcService) GetOIDCProviderMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) { - path := "/oidc/provider/metadata" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go index 1557dc9e..df75dce4 100644 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingaccess.go @@ -1,252 +1,5 @@ package pingaccess -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "net/http/httputil" - "net/url" - "runtime" - "strings" -) - -const logReqMsg = `DEBUG: Request %s Details: ----[ REQUEST ]-------------------------------------- -%s ------------------------------------------------------` -const logRespMsg = `DEBUG: Response %s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -type Client struct { - Username string - Password string - BaseURL *url.URL - Context string - LogDebug bool - httpClient *http.Client - - AccessTokenValidators AccessTokenValidatorsAPI - Acme AcmeAPI - AdminConfig AdminConfigAPI - AdminSessionInfo AdminSessionInfoAPI - Agents AgentsAPI - Applications ApplicationsAPI - Auth AuthAPI - AuthTokenManagement AuthTokenManagementAPI - AuthnReqLists AuthnReqListsAPI - Backup BackupAPI - Certificates CertificatesAPI - Config ConfigAPI - EngineListeners EngineListenersAPI - Engines EnginesAPI - GlobalUnprotectedResources GlobalUnprotectedResourcesAPI - HighAvailability HighAvailabilityAPI - HsmProviders HsmProvidersAPI - HttpConfig HttpConfigAPI - HttpsListeners HttpsListenersAPI - IdentityMappings IdentityMappingsAPI - KeyPairs KeyPairsAPI - License LicenseAPI - Oauth OauthAPI - OauthKeyManagement OauthKeyManagementAPI - Oidc OidcAPI - Pingfederate PingfederateAPI - Pingone PingoneAPI - Proxies ProxiesAPI - Redirects RedirectsAPI - RejectionHandlers RejectionHandlersAPI - Rules RulesAPI - Rulesets RulesetsAPI - SharedSecrets SharedSecretsAPI - SiteAuthenticators SiteAuthenticatorsAPI - Sites SitesAPI - ThirdPartyServices ThirdPartyServicesAPI - TokenProvider TokenProviderAPI - TrustedCertificateGroups TrustedCertificateGroupsAPI - UnknownResources UnknownResourcesAPI - Users UsersAPI - Version VersionAPI - Virtualhosts VirtualhostsAPI - WebSessionManagement WebSessionManagementAPI - WebSessions WebSessionsAPI -} - -type service struct { - client *Client -} - -func NewClient(username string, password string, baseUrl *url.URL, context string, httpClient *http.Client) *Client { - if httpClient == nil { - httpClient = http.DefaultClient - } - c := &Client{httpClient: httpClient} - c.Username = username - c.Password = password - c.BaseURL = baseUrl - c.Context = context - - c.AccessTokenValidators = &AccessTokenValidatorsService{client: c} - c.Acme = &AcmeService{client: c} - c.AdminConfig = &AdminConfigService{client: c} - c.AdminSessionInfo = &AdminSessionInfoService{client: c} - c.Agents = &AgentsService{client: c} - c.Applications = &ApplicationsService{client: c} - c.Auth = &AuthService{client: c} - c.AuthTokenManagement = &AuthTokenManagementService{client: c} - c.AuthnReqLists = &AuthnReqListsService{client: c} - c.Backup = &BackupService{client: c} - c.Certificates = &CertificatesService{client: c} - c.Config = &ConfigService{client: c} - c.EngineListeners = &EngineListenersService{client: c} - c.Engines = &EnginesService{client: c} - c.GlobalUnprotectedResources = &GlobalUnprotectedResourcesService{client: c} - c.HighAvailability = &HighAvailabilityService{client: c} - c.HsmProviders = &HsmProvidersService{client: c} - c.HttpConfig = &HttpConfigService{client: c} - c.HttpsListeners = &HttpsListenersService{client: c} - c.IdentityMappings = &IdentityMappingsService{client: c} - c.KeyPairs = &KeyPairsService{client: c} - c.License = &LicenseService{client: c} - c.Oauth = &OauthService{client: c} - c.OauthKeyManagement = &OauthKeyManagementService{client: c} - c.Oidc = &OidcService{client: c} - c.Pingfederate = &PingfederateService{client: c} - c.Pingone = &PingoneService{client: c} - c.Proxies = &ProxiesService{client: c} - c.Redirects = &RedirectsService{client: c} - c.RejectionHandlers = &RejectionHandlersService{client: c} - c.Rules = &RulesService{client: c} - c.Rulesets = &RulesetsService{client: c} - c.SharedSecrets = &SharedSecretsService{client: c} - c.SiteAuthenticators = &SiteAuthenticatorsService{client: c} - c.Sites = &SitesService{client: c} - c.ThirdPartyServices = &ThirdPartyServicesService{client: c} - c.TokenProvider = &TokenProviderService{client: c} - c.TrustedCertificateGroups = &TrustedCertificateGroupsService{client: c} - c.UnknownResources = &UnknownResourcesService{client: c} - c.Users = &UsersService{client: c} - c.Version = &VersionService{client: c} - c.Virtualhosts = &VirtualhostsService{client: c} - c.WebSessionManagement = &WebSessionManagementService{client: c} - c.WebSessions = &WebSessionsService{client: c} - return c -} - -func (c *Client) newRequest(method string, path *url.URL, body interface{}) (*http.Request, error) { - u := c.BaseURL.ResolveReference(path) - var buf io.ReadWriter - if body != nil { - buf = new(bytes.Buffer) - err := json.NewEncoder(buf).Encode(body) - if err != nil { - return nil, err - } - } - req, err := http.NewRequest(method, u.String(), buf) - if err != nil { - return nil, err - } - if body != nil { - req.Header.Set("Content-Type", "application/json") - } - req.SetBasicAuth(c.Username, c.Password) - req.Header.Add("X-Xsrf-Header", "PingAccess") - req.Header.Add("User-Agent", fmt.Sprintf("%s/%s (%s; %s; %s)", SDKName, SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)) - return req, nil -} - -func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) { - if c.LogDebug { - requestDump, err := httputil.DumpRequest(req, true) - if err != nil { - fmt.Println(err) - } - log.Printf(logReqMsg, "pingaccess-sdk-go", string(requestDump)) - } - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - if c.LogDebug { - responseDump, err := httputil.DumpResponse(resp, true) - if err != nil { - fmt.Println(err) - } - log.Printf(logRespMsg, "pingaccess-sdk-go", string(responseDump)) - } - err = CheckResponse(resp) - if err != nil { - return resp, err - } - defer resp.Body.Close() - - if v != nil { - if w, ok := v.(io.Writer); ok { - _, err = io.Copy(w, resp.Body) - } else { - decErr := json.NewDecoder(resp.Body).Decode(v) - if decErr == io.EOF { - decErr = nil // ignore EOF errors caused by empty response body - } - if decErr != nil { - err = decErr - } - } - } - return resp, err -} - -// CheckResponse checks the API response for errors, and returns them if -// present. A response is considered an error if it has a status code outside -// the 200 range. -// API error responses are expected to have either no response -// body, or a JSON response body that maps to ErrorResponse. Any other -// response body will be silently ignored. -func CheckResponse(r *http.Response) error { - if c := r.StatusCode; 200 <= c && c <= 299 { - return nil - } - - errorResponse := ApiErrorView{} - data, err := ioutil.ReadAll(r.Body) - if err == nil && data != nil { - err = json.Unmarshal(data, &errorResponse) - if err != nil { - return fmt.Errorf("Unable to parse error response: " + string(data)) - } - } - - return &errorResponse -} - -func (r *ApiErrorView) Error() (message string) { - if len(*r.Flash) > 0 { - var msgs []string - for i := range *r.Flash { - msgs = append(msgs, *(*r.Flash)[i]) - } - message = strings.Join(msgs, ", ") - } - - if len(r.Form) > 0 { - for s, i := range r.Form { - var msgs []string - for _, j := range *i { - msgs = append(msgs, *j) - } - message += fmt.Sprintf(":\n%s contains %d validation failures:\n\t%s", s, len(msgs), strings.Join(msgs, "\n\t")) - } - } - - return -} - // Bool is a helper routine that allocates a new bool value // to store v and returns a pointer to it. func Bool(v bool) *bool { return &v } diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingfederate.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingfederate.go deleted file mode 100644 index 600d0ca1..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingfederate.go +++ /dev/null @@ -1,308 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type PingfederateService service - -type PingfederateAPI interface { - DeletePingFederateCommand() (resp *http.Response, err error) - GetPingFederateCommand() (result *PingFederateRuntimeView, resp *http.Response, err error) - UpdatePingFederateCommand(input *UpdatePingFederateCommandInput) (result *PingFederateRuntimeView, resp *http.Response, err error) - DeletePingFederateAccessTokensCommand() (resp *http.Response, err error) - GetPingFederateAccessTokensCommand() (result *PingFederateAccessTokenView, resp *http.Response, err error) - UpdatePingFederateAccessTokensCommand(input *UpdatePingFederateAccessTokensCommandInput) (result *PingFederateAccessTokenView, resp *http.Response, err error) - DeletePingFederateAdminCommand() (resp *http.Response, err error) - GetPingFederateAdminCommand() (result *PingFederateAdminView, resp *http.Response, err error) - UpdatePingFederateAdminCommand(input *UpdatePingFederateAdminCommandInput) (result *PingFederateAdminView, resp *http.Response, err error) - GetLegacyPingFederateMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) - DeletePingFederateRuntimeCommand() (resp *http.Response, err error) - GetPingFederateRuntimeCommand() (result *PingFederateMetadataRuntimeView, resp *http.Response, err error) - UpdatePingFederateRuntimeCommand(input *UpdatePingFederateRuntimeCommandInput) (result *PingFederateMetadataRuntimeView, resp *http.Response, err error) - GetPingFederateMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) -} - -//DeletePingFederateCommand - [Attention: This endpoint "/pingfederate" is deprecated. Please use /pingfederate/runtime to configure PingFederate instead] Resets the PingFederate configuration to default values -//RequestType: DELETE -//Input: -func (s *PingfederateService) DeletePingFederateCommand() (resp *http.Response, err error) { - path := "/pingfederate" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetPingFederateCommand - [Attention: This endpoint "/pingfederate" is deprecated. Please use /pingfederate/runtime to configure PingFederate instead] Get the PingFederate configuration -//RequestType: GET -//Input: -func (s *PingfederateService) GetPingFederateCommand() (result *PingFederateRuntimeView, resp *http.Response, err error) { - path := "/pingfederate" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdatePingFederateCommand - [Attention: This endpoint "/pingfederate" is deprecated. Please use /pingfederate/runtime to configure PingFederate instead] Update the PingFederate configuration -//RequestType: PUT -//Input: input *UpdatePingFederateCommandInput -func (s *PingfederateService) UpdatePingFederateCommand(input *UpdatePingFederateCommandInput) (result *PingFederateRuntimeView, resp *http.Response, err error) { - path := "/pingfederate" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdatePingFederateCommandInput struct { - Body PingFederateRuntimeView -} - -//DeletePingFederateAccessTokensCommand - Resets the PingAccess OAuth Client configuration to default values -//RequestType: DELETE -//Input: -func (s *PingfederateService) DeletePingFederateAccessTokensCommand() (resp *http.Response, err error) { - path := "/pingfederate/accessTokens" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetPingFederateAccessTokensCommand - Get the PingAccess OAuth Client configuration -//RequestType: GET -//Input: -func (s *PingfederateService) GetPingFederateAccessTokensCommand() (result *PingFederateAccessTokenView, resp *http.Response, err error) { - path := "/pingfederate/accessTokens" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdatePingFederateAccessTokensCommand - Update the PingFederate OAuth Client configuration -//RequestType: PUT -//Input: input *UpdatePingFederateAccessTokensCommandInput -func (s *PingfederateService) UpdatePingFederateAccessTokensCommand(input *UpdatePingFederateAccessTokensCommandInput) (result *PingFederateAccessTokenView, resp *http.Response, err error) { - path := "/pingfederate/accessTokens" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdatePingFederateAccessTokensCommandInput struct { - Body PingFederateAccessTokenView -} - -//DeletePingFederateAdminCommand - Resets the PingFederate Admin configuration to default values -//RequestType: DELETE -//Input: -func (s *PingfederateService) DeletePingFederateAdminCommand() (resp *http.Response, err error) { - path := "/pingfederate/admin" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetPingFederateAdminCommand - Get the PingFederate Admin configuration -//RequestType: GET -//Input: -func (s *PingfederateService) GetPingFederateAdminCommand() (result *PingFederateAdminView, resp *http.Response, err error) { - path := "/pingfederate/admin" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdatePingFederateAdminCommand - Update the PingFederate Admin configuration -//RequestType: PUT -//Input: input *UpdatePingFederateAdminCommandInput -func (s *PingfederateService) UpdatePingFederateAdminCommand(input *UpdatePingFederateAdminCommandInput) (result *PingFederateAdminView, resp *http.Response, err error) { - path := "/pingfederate/admin" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdatePingFederateAdminCommandInput struct { - Body PingFederateAdminView -} - -//GetLegacyPingFederateMetadataCommand - [Attention: The endpoint "/pingfederate" is deprecated. This metadata corresponds to that configuration."/pingfederate/runtime" and "/pingfederate/runtime/metadata" should be used instead.] Get the PingFederate metadata -//RequestType: GET -//Input: -func (s *PingfederateService) GetLegacyPingFederateMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) { - path := "/pingfederate/metadata" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//DeletePingFederateRuntimeCommand - Resets the PingFederate configuration -//RequestType: DELETE -//Input: -func (s *PingfederateService) DeletePingFederateRuntimeCommand() (resp *http.Response, err error) { - path := "/pingfederate/runtime" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetPingFederateRuntimeCommand - Get the PingFederate configuration -//RequestType: GET -//Input: -func (s *PingfederateService) GetPingFederateRuntimeCommand() (result *PingFederateMetadataRuntimeView, resp *http.Response, err error) { - path := "/pingfederate/runtime" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdatePingFederateRuntimeCommand - Update the PingFederate configuration -//RequestType: PUT -//Input: input *UpdatePingFederateRuntimeCommandInput -func (s *PingfederateService) UpdatePingFederateRuntimeCommand(input *UpdatePingFederateRuntimeCommandInput) (result *PingFederateMetadataRuntimeView, resp *http.Response, err error) { - path := "/pingfederate/runtime" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdatePingFederateRuntimeCommandInput struct { - Body PingFederateMetadataRuntimeView -} - -//GetPingFederateMetadataCommand - Get the PingFederate Runtime metadata -//RequestType: GET -//Input: -func (s *PingfederateService) GetPingFederateMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) { - path := "/pingfederate/runtime/metadata" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingone.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingone.go deleted file mode 100644 index be383e60..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/pingone.go +++ /dev/null @@ -1,96 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type PingoneService service - -type PingoneAPI interface { - DeletePingOne4CCommand() (resp *http.Response, err error) - GetPingOne4CCommand() (result *PingOne4CView, resp *http.Response, err error) - UpdatePingOne4CCommand(input *UpdatePingOne4CCommandInput) (result *PingOne4CView, resp *http.Response, err error) - GetPingOne4CMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) -} - -//DeletePingOne4CCommand - Resets the PingOne For Customers configuration to default values -//RequestType: DELETE -//Input: -func (s *PingoneService) DeletePingOne4CCommand() (resp *http.Response, err error) { - path := "/pingone/customers" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetPingOne4CCommand - Get the PingOne For Customers configuration -//RequestType: GET -//Input: -func (s *PingoneService) GetPingOne4CCommand() (result *PingOne4CView, resp *http.Response, err error) { - path := "/pingone/customers" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdatePingOne4CCommand - Update the PingOne For Customers configuration -//RequestType: PUT -//Input: input *UpdatePingOne4CCommandInput -func (s *PingoneService) UpdatePingOne4CCommand(input *UpdatePingOne4CCommandInput) (result *PingOne4CView, resp *http.Response, err error) { - path := "/pingone/customers" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdatePingOne4CCommandInput struct { - Body PingOne4CView -} - -//GetPingOne4CMetadataCommand - Get the PingOne for Customers metadata -//RequestType: GET -//Input: -func (s *PingoneService) GetPingOne4CMetadataCommand() (result *OIDCProviderMetadata, resp *http.Response, err error) { - path := "/pingone/customers/metadata" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/proxies.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/proxies.go deleted file mode 100644 index 5432799d..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/proxies.go +++ /dev/null @@ -1,165 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type ProxiesService service - -type ProxiesAPI interface { - GetProxiesCommand(input *GetProxiesCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) - AddProxyCommand(input *AddProxyCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) - DeleteProxyCommand(input *DeleteProxyCommandInput) (resp *http.Response, err error) - GetProxyCommand(input *GetProxyCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) - UpdateProxyCommand(input *UpdateProxyCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) -} - -//GetProxiesCommand - Get all Proxies -//RequestType: GET -//Input: input *GetProxiesCommandInput -func (s *ProxiesService) GetProxiesCommand(input *GetProxiesCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) { - path := "/proxies" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetProxiesCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddProxyCommand - Create a Proxy -//RequestType: POST -//Input: input *AddProxyCommandInput -func (s *ProxiesService) AddProxyCommand(input *AddProxyCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) { - path := "/proxies" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddProxyCommandInput struct { - Body HttpClientProxyView -} - -//DeleteProxyCommand - Delete a Proxy -//RequestType: DELETE -//Input: input *DeleteProxyCommandInput -func (s *ProxiesService) DeleteProxyCommand(input *DeleteProxyCommandInput) (resp *http.Response, err error) { - path := "/proxies/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteProxyCommandInput struct { - Id string -} - -//GetProxyCommand - Get a Proxy -//RequestType: GET -//Input: input *GetProxyCommandInput -func (s *ProxiesService) GetProxyCommand(input *GetProxyCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) { - path := "/proxies/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetProxyCommandInput struct { - Id string -} - -//UpdateProxyCommand - Update a Proxy -//RequestType: PUT -//Input: input *UpdateProxyCommandInput -func (s *ProxiesService) UpdateProxyCommand(input *UpdateProxyCommandInput) (result *HttpClientProxyView, resp *http.Response, err error) { - path := "/proxies/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateProxyCommandInput struct { - Body HttpClientProxyView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/redirects.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/redirects.go deleted file mode 100644 index bd08bd72..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/redirects.go +++ /dev/null @@ -1,169 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type RedirectsService service - -type RedirectsAPI interface { - GetRedirectsCommand(input *GetRedirectsCommandInput) (result *RedirectsView, resp *http.Response, err error) - AddRedirectCommand(input *AddRedirectCommandInput) (result *RedirectView, resp *http.Response, err error) - DeleteRedirectCommand(input *DeleteRedirectCommandInput) (resp *http.Response, err error) - GetRedirectCommand(input *GetRedirectCommandInput) (result *RedirectView, resp *http.Response, err error) - UpdateRedirectCommand(input *UpdateRedirectCommandInput) (result *RedirectView, resp *http.Response, err error) -} - -//GetRedirectsCommand - Get all Redirects -//RequestType: GET -//Input: input *GetRedirectsCommandInput -func (s *RedirectsService) GetRedirectsCommand(input *GetRedirectsCommandInput) (result *RedirectsView, resp *http.Response, err error) { - path := "/redirects" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Source != "" { - q.Set("source", input.Source) - } - if input.Target != "" { - q.Set("target", input.Target) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetRedirectsCommandInput struct { - Page string - NumberPerPage string - Filter string - Source string - Target string - SortKey string - Order string -} - -//AddRedirectCommand - Add a Redirect -//RequestType: POST -//Input: input *AddRedirectCommandInput -func (s *RedirectsService) AddRedirectCommand(input *AddRedirectCommandInput) (result *RedirectView, resp *http.Response, err error) { - path := "/redirects" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddRedirectCommandInput struct { - Body RedirectView -} - -//DeleteRedirectCommand - Delete a Redirect -//RequestType: DELETE -//Input: input *DeleteRedirectCommandInput -func (s *RedirectsService) DeleteRedirectCommand(input *DeleteRedirectCommandInput) (resp *http.Response, err error) { - path := "/redirects/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteRedirectCommandInput struct { - Id string -} - -//GetRedirectCommand - Get a Redirect -//RequestType: GET -//Input: input *GetRedirectCommandInput -func (s *RedirectsService) GetRedirectCommand(input *GetRedirectCommandInput) (result *RedirectView, resp *http.Response, err error) { - path := "/redirects/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetRedirectCommandInput struct { - Id string -} - -//UpdateRedirectCommand - Update a Redirect -//RequestType: PUT -//Input: input *UpdateRedirectCommandInput -func (s *RedirectsService) UpdateRedirectCommand(input *UpdateRedirectCommandInput) (result *RedirectView, resp *http.Response, err error) { - path := "/redirects/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateRedirectCommandInput struct { - Body RedirectView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rejectionHandlers.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rejectionHandlers.go deleted file mode 100644 index 3d0bd3c4..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rejectionHandlers.go +++ /dev/null @@ -1,211 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type RejectionHandlersService service - -type RejectionHandlersAPI interface { - GetRejectionHandlersCommand(input *GetRejectionHandlersCommandInput) (result *RejectionHandlersView, resp *http.Response, err error) - AddRejectionHandlerCommand(input *AddRejectionHandlerCommandInput) (result *RejectionHandlerView, resp *http.Response, err error) - GetRejectionHandlerDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) - GetRejecitonHandlerDescriptorCommand(input *GetRejecitonHandlerDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) - DeleteRejectionHandlerCommand(input *DeleteRejectionHandlerCommandInput) (resp *http.Response, err error) - GetRejectionHandlerCommand(input *GetRejectionHandlerCommandInput) (result *RejectionHandlerView, resp *http.Response, err error) - UpdateRejectionHandlerCommand(input *UpdateRejectionHandlerCommandInput) (result *RejectionHandlerView, resp *http.Response, err error) -} - -//GetRejectionHandlersCommand - Get all Rejection Handlers -//RequestType: GET -//Input: input *GetRejectionHandlersCommandInput -func (s *RejectionHandlersService) GetRejectionHandlersCommand(input *GetRejectionHandlersCommandInput) (result *RejectionHandlersView, resp *http.Response, err error) { - path := "/rejectionHandlers" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetRejectionHandlersCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddRejectionHandlerCommand - Create a Rejection Handler -//RequestType: POST -//Input: input *AddRejectionHandlerCommandInput -func (s *RejectionHandlersService) AddRejectionHandlerCommand(input *AddRejectionHandlerCommandInput) (result *RejectionHandlerView, resp *http.Response, err error) { - path := "/rejectionHandlers" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddRejectionHandlerCommandInput struct { - Body RejectionHandlerView -} - -//GetRejectionHandlerDescriptorsCommand - Get descriptors for all supported Rejection Handler types -//RequestType: GET -//Input: -func (s *RejectionHandlersService) GetRejectionHandlerDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) { - path := "/rejectionHandlers/descriptors" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetRejecitonHandlerDescriptorCommand - Get descriptor for a Rejection Handler type -//RequestType: GET -//Input: input *GetRejecitonHandlerDescriptorCommandInput -func (s *RejectionHandlersService) GetRejecitonHandlerDescriptorCommand(input *GetRejecitonHandlerDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) { - path := "/rejectionHandlers/descriptors/{rejectionHandlerType}" - path = strings.Replace(path, "{rejectionHandlerType}", input.RejectionHandlerType, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetRejecitonHandlerDescriptorCommandInput struct { - RejectionHandlerType string -} - -//DeleteRejectionHandlerCommand - Delete a Rejection Handler -//RequestType: DELETE -//Input: input *DeleteRejectionHandlerCommandInput -func (s *RejectionHandlersService) DeleteRejectionHandlerCommand(input *DeleteRejectionHandlerCommandInput) (resp *http.Response, err error) { - path := "/rejectionHandlers/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteRejectionHandlerCommandInput struct { - Id string -} - -//GetRejectionHandlerCommand - Get a Rejection Handler -//RequestType: GET -//Input: input *GetRejectionHandlerCommandInput -func (s *RejectionHandlersService) GetRejectionHandlerCommand(input *GetRejectionHandlerCommandInput) (result *RejectionHandlerView, resp *http.Response, err error) { - path := "/rejectionHandlers/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetRejectionHandlerCommandInput struct { - Id string -} - -//UpdateRejectionHandlerCommand - Update a Rejection Handler -//RequestType: PUT -//Input: input *UpdateRejectionHandlerCommandInput -func (s *RejectionHandlersService) UpdateRejectionHandlerCommand(input *UpdateRejectionHandlerCommandInput) (result *RejectionHandlerView, resp *http.Response, err error) { - path := "/rejectionHandlers/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateRejectionHandlerCommandInput struct { - Body RejectionHandlerView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/request/request.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/request/request.go new file mode 100644 index 00000000..0005833c --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/request/request.go @@ -0,0 +1,234 @@ +package request + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/httputil" + "net/url" + "reflect" + "regexp" + "runtime" + "strings" + "time" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + + "github.com/google/uuid" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" +) + +const logReqMsg = `DEBUG: Request %s/%s: %s +---[ REQUEST ]-------------------------------------- +%s +-----------------------------------------------------` +const logRespMsg = `DEBUG: Response %s/%s: %s +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +// A Request is the service request to be made. +type Request struct { + Config config.Config + ClientInfo metadata.ClientInfo + + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + QueryParams map[string]string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg config.Config, clientInfo metadata.ClientInfo, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + + var buf io.ReadWriter + if params != nil { + buf = new(bytes.Buffer) + err := json.NewEncoder(buf).Encode(params) + if err != nil { + return nil + } + } + + httpReq, _ := http.NewRequest(method, "", buf) + + httpReq.SetBasicAuth(*cfg.Username, *cfg.Password) + httpReq.Header.Add("X-Xsrf-Header", "pingaccess") + httpReq.Header.Add("User-Agent", fmt.Sprintf("%s/%s (%s; %s; %s)", pingaccess.SDKName, pingaccess.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)) + + if params != nil { + httpReq.Header.Set("Content-Type", "application/json") + } + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = fmt.Errorf("invalid endpoint uri %s", err) + } + + q := httpReq.URL.Query() + for k, v := range operation.QueryParams { + if v != "" { + q.Set(k, v) + } + } + httpReq.URL.RawQuery = q.Encode() + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + + AttemptTime: time.Now(), + Time: time.Now(), + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + RequestID: uuid.New().String(), + } + r.Body = bytes.NewReader([]byte{}) + + return r +} + +// Send will send the request, returning error if errors are encountered. +// +func (r *Request) Send() error { + if *r.Config.LogDebug { + requestDump, err := httputil.DumpRequest(r.HTTPRequest, true) + requestDumpStr := string(requestDump) + if *r.Config.MaskAuthorization { + r, _ := regexp.Compile(`Authorization: (.*)`) + requestDumpStr = r.ReplaceAllString(requestDumpStr, "Authorization: ********") + } + if err != nil { + fmt.Println(err) + } + log.Printf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, r.RequestID, requestDumpStr) + } + r.AttemptTime = time.Now() + + resp, err := r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + r.Error = err + return err + } + r.HTTPResponse = resp + + if *r.Config.LogDebug { + requestDump, err := httputil.DumpResponse(r.HTTPResponse, true) + if err != nil { + fmt.Println(err) + } + log.Printf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, r.RequestID, string(requestDump)) + } + + r.CheckResponse() + if r.Error != nil { + return r.Error + } + + if r.DataFilled() { + //v := reflect.Indirect(reflect.ValueOf(r.Data)) + defer r.HTTPResponse.Body.Close() + + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&r.Data); err != nil { + if err == io.EOF { + err = nil // ignore EOF errors caused by empty response body + } else { + r.Error = fmt.Errorf("failed to unmarshall response %s", err) + return err + } + } + } + + return nil +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// CheckResponse checks the API response for errors, and returns them if +// present. A response is considered an error if it has a status code outside +// the 200 range. +// API error responses are expected to have either no response +// body, or a JSON response body that maps to ErrorResponse. Any other +// response body will be silently ignored. +func (r *Request) CheckResponse() { + if c := r.HTTPResponse.StatusCode; 200 <= c && c <= 299 { + return + } + r.Data = nil + errorResponse := PingAccessError{} + data, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err == nil && data != nil { + err = json.Unmarshal(data, &errorResponse) + if err != nil { + r.Error = fmt.Errorf("unable to parse error response: %s", string(data)) + return + } + } + r.Error = &errorResponse +} + +// PingFederateError occurs when PingFederate returns a non 2XX response +type PingAccessError struct { + models.ApiErrorView +} + +func (r *PingAccessError) Error() (message string) { + if len(*r.Flash) > 0 { + var msgs []string + for i := range *r.Flash { + msgs = append(msgs, *(*r.Flash)[i]) + } + message = strings.Join(msgs, ", ") + } + + if len(r.Form) > 0 { + for s, i := range r.Form { + var msgs []string + for _, j := range *i { + msgs = append(msgs, *j) + } + message += fmt.Sprintf(":\n%s contains %d validation failures:\n\t%s", s, len(msgs), strings.Join(msgs, "\n\t")) + } + } + + return +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rules.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rules.go deleted file mode 100644 index 079531f5..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rules.go +++ /dev/null @@ -1,211 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type RulesService service - -type RulesAPI interface { - GetRulesCommand(input *GetRulesCommandInput) (result *RulesView, resp *http.Response, err error) - AddRuleCommand(input *AddRuleCommandInput) (result *RuleView, resp *http.Response, err error) - GetRuleDescriptorsCommand() (result *RuleDescriptorsView, resp *http.Response, err error) - GetRuleDescriptorCommand(input *GetRuleDescriptorCommandInput) (result *RuleDescriptorView, resp *http.Response, err error) - DeleteRuleCommand(input *DeleteRuleCommandInput) (resp *http.Response, err error) - GetRuleCommand(input *GetRuleCommandInput) (result *RuleView, resp *http.Response, err error) - UpdateRuleCommand(input *UpdateRuleCommandInput) (result *RuleView, resp *http.Response, err error) -} - -//GetRulesCommand - Get all Rules -//RequestType: GET -//Input: input *GetRulesCommandInput -func (s *RulesService) GetRulesCommand(input *GetRulesCommandInput) (result *RulesView, resp *http.Response, err error) { - path := "/rules" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetRulesCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddRuleCommand - Add a Rule -//RequestType: POST -//Input: input *AddRuleCommandInput -func (s *RulesService) AddRuleCommand(input *AddRuleCommandInput) (result *RuleView, resp *http.Response, err error) { - path := "/rules" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddRuleCommandInput struct { - Body RuleView -} - -//GetRuleDescriptorsCommand - Get descriptors for all supported Rule types -//RequestType: GET -//Input: -func (s *RulesService) GetRuleDescriptorsCommand() (result *RuleDescriptorsView, resp *http.Response, err error) { - path := "/rules/descriptors" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetRuleDescriptorCommand - Get descriptor for a Rule type -//RequestType: GET -//Input: input *GetRuleDescriptorCommandInput -func (s *RulesService) GetRuleDescriptorCommand(input *GetRuleDescriptorCommandInput) (result *RuleDescriptorView, resp *http.Response, err error) { - path := "/rules/descriptors/{ruleType}" - path = strings.Replace(path, "{ruleType}", input.RuleType, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetRuleDescriptorCommandInput struct { - RuleType string -} - -//DeleteRuleCommand - Delete a Rule -//RequestType: DELETE -//Input: input *DeleteRuleCommandInput -func (s *RulesService) DeleteRuleCommand(input *DeleteRuleCommandInput) (resp *http.Response, err error) { - path := "/rules/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteRuleCommandInput struct { - Id string -} - -//GetRuleCommand - Get a Rule -//RequestType: GET -//Input: input *GetRuleCommandInput -func (s *RulesService) GetRuleCommand(input *GetRuleCommandInput) (result *RuleView, resp *http.Response, err error) { - path := "/rules/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetRuleCommandInput struct { - Id string -} - -//UpdateRuleCommand - Update a Rule -//RequestType: PUT -//Input: input *UpdateRuleCommandInput -func (s *RulesService) UpdateRuleCommand(input *UpdateRuleCommandInput) (result *RuleView, resp *http.Response, err error) { - path := "/rules/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateRuleCommandInput struct { - Body RuleView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rulesets.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rulesets.go deleted file mode 100644 index a6f1d856..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/rulesets.go +++ /dev/null @@ -1,205 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type RulesetsService service - -type RulesetsAPI interface { - GetRuleSetsCommand(input *GetRuleSetsCommandInput) (result *RuleSetsView, resp *http.Response, err error) - AddRuleSetCommand(input *AddRuleSetCommandInput) (result *RuleSetView, resp *http.Response, err error) - GetRuleSetElementTypesCommand() (result *RuleSetElementTypesView, resp *http.Response, err error) - GetRuleSetSuccessCriteriaCommand() (result *RuleSetSuccessCriteriaView, resp *http.Response, err error) - DeleteRuleSetCommand(input *DeleteRuleSetCommandInput) (resp *http.Response, err error) - GetRuleSetCommand(input *GetRuleSetCommandInput) (result *RuleSetView, resp *http.Response, err error) - UpdateRuleSetCommand(input *UpdateRuleSetCommandInput) (result *RuleSetView, resp *http.Response, err error) -} - -//GetRuleSetsCommand - Get all Rule Sets -//RequestType: GET -//Input: input *GetRuleSetsCommandInput -func (s *RulesetsService) GetRuleSetsCommand(input *GetRuleSetsCommandInput) (result *RuleSetsView, resp *http.Response, err error) { - path := "/rulesets" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetRuleSetsCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddRuleSetCommand - Add a Rule Set -//RequestType: POST -//Input: input *AddRuleSetCommandInput -func (s *RulesetsService) AddRuleSetCommand(input *AddRuleSetCommandInput) (result *RuleSetView, resp *http.Response, err error) { - path := "/rulesets" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddRuleSetCommandInput struct { - Body RuleSetView -} - -//GetRuleSetElementTypesCommand - Get all Rule Set Element Types -//RequestType: GET -//Input: -func (s *RulesetsService) GetRuleSetElementTypesCommand() (result *RuleSetElementTypesView, resp *http.Response, err error) { - path := "/rulesets/elementTypes" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetRuleSetSuccessCriteriaCommand - Get all Success Criteria -//RequestType: GET -//Input: -func (s *RulesetsService) GetRuleSetSuccessCriteriaCommand() (result *RuleSetSuccessCriteriaView, resp *http.Response, err error) { - path := "/rulesets/successCriteria" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//DeleteRuleSetCommand - Delete a Rule Set -//RequestType: DELETE -//Input: input *DeleteRuleSetCommandInput -func (s *RulesetsService) DeleteRuleSetCommand(input *DeleteRuleSetCommandInput) (resp *http.Response, err error) { - path := "/rulesets/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteRuleSetCommandInput struct { - Id string -} - -//GetRuleSetCommand - Get a Rule Set -//RequestType: GET -//Input: input *GetRuleSetCommandInput -func (s *RulesetsService) GetRuleSetCommand(input *GetRuleSetCommandInput) (result *RuleSetView, resp *http.Response, err error) { - path := "/rulesets/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetRuleSetCommandInput struct { - Id string -} - -//UpdateRuleSetCommand - Update a Rule Set -//RequestType: PUT -//Input: input *UpdateRuleSetCommandInput -func (s *RulesetsService) UpdateRuleSetCommand(input *UpdateRuleSetCommandInput) (result *RuleSetView, resp *http.Response, err error) { - path := "/rulesets/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateRuleSetCommandInput struct { - Body RuleSetView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sharedSecrets.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sharedSecrets.go deleted file mode 100644 index 49defc9f..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sharedSecrets.go +++ /dev/null @@ -1,122 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type SharedSecretsService service - -type SharedSecretsAPI interface { - GetSharedSecretsCommand(input *GetSharedSecretsCommandInput) (result *SharedSecretsView, resp *http.Response, err error) - AddSharedSecretCommand(input *AddSharedSecretCommandInput) (result *SharedSecretView, resp *http.Response, err error) - DeleteSharedSecretCommand(input *DeleteSharedSecretCommandInput) (resp *http.Response, err error) - GetSharedSecretCommand(input *GetSharedSecretCommandInput) (result *SharedSecretView, resp *http.Response, err error) -} - -//GetSharedSecretsCommand - Get all Shared Secrets -//RequestType: GET -//Input: input *GetSharedSecretsCommandInput -func (s *SharedSecretsService) GetSharedSecretsCommand(input *GetSharedSecretsCommandInput) (result *SharedSecretsView, resp *http.Response, err error) { - path := "/sharedSecrets" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetSharedSecretsCommandInput struct { - SortKey string - Order string -} - -//AddSharedSecretCommand - Create a Shared Secret -//RequestType: POST -//Input: input *AddSharedSecretCommandInput -func (s *SharedSecretsService) AddSharedSecretCommand(input *AddSharedSecretCommandInput) (result *SharedSecretView, resp *http.Response, err error) { - path := "/sharedSecrets" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddSharedSecretCommandInput struct { - Body SharedSecretView -} - -//DeleteSharedSecretCommand - Delete a Shared Secret -//RequestType: DELETE -//Input: input *DeleteSharedSecretCommandInput -func (s *SharedSecretsService) DeleteSharedSecretCommand(input *DeleteSharedSecretCommandInput) (resp *http.Response, err error) { - path := "/sharedSecrets/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteSharedSecretCommandInput struct { - Id string -} - -//GetSharedSecretCommand - Get a Shared Secret -//RequestType: GET -//Input: input *GetSharedSecretCommandInput -func (s *SharedSecretsService) GetSharedSecretCommand(input *GetSharedSecretCommandInput) (result *SharedSecretView, resp *http.Response, err error) { - path := "/sharedSecrets/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetSharedSecretCommandInput struct { - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/siteAuthenticators.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/siteAuthenticators.go deleted file mode 100644 index 67d9beda..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/siteAuthenticators.go +++ /dev/null @@ -1,211 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type SiteAuthenticatorsService service - -type SiteAuthenticatorsAPI interface { - GetSiteAuthenticatorsCommand(input *GetSiteAuthenticatorsCommandInput) (result *SiteAuthenticatorsView, resp *http.Response, err error) - AddSiteAuthenticatorCommand(input *AddSiteAuthenticatorCommandInput) (result *SiteAuthenticatorView, resp *http.Response, err error) - GetSiteAuthenticatorDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) - GetSiteAuthenticatorDescriptorCommand(input *GetSiteAuthenticatorDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) - DeleteSiteAuthenticatorCommand(input *DeleteSiteAuthenticatorCommandInput) (resp *http.Response, err error) - GetSiteAuthenticatorCommand(input *GetSiteAuthenticatorCommandInput) (result *SiteAuthenticatorView, resp *http.Response, err error) - UpdateSiteAuthenticatorCommand(input *UpdateSiteAuthenticatorCommandInput) (result *SiteAuthenticatorView, resp *http.Response, err error) -} - -//GetSiteAuthenticatorsCommand - Get all Site Authenticators -//RequestType: GET -//Input: input *GetSiteAuthenticatorsCommandInput -func (s *SiteAuthenticatorsService) GetSiteAuthenticatorsCommand(input *GetSiteAuthenticatorsCommandInput) (result *SiteAuthenticatorsView, resp *http.Response, err error) { - path := "/siteAuthenticators" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetSiteAuthenticatorsCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddSiteAuthenticatorCommand - Create a Site Authenticator -//RequestType: POST -//Input: input *AddSiteAuthenticatorCommandInput -func (s *SiteAuthenticatorsService) AddSiteAuthenticatorCommand(input *AddSiteAuthenticatorCommandInput) (result *SiteAuthenticatorView, resp *http.Response, err error) { - path := "/siteAuthenticators" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddSiteAuthenticatorCommandInput struct { - Body SiteAuthenticatorView -} - -//GetSiteAuthenticatorDescriptorsCommand - Get descriptors for all supported Site Authenticator types -//RequestType: GET -//Input: -func (s *SiteAuthenticatorsService) GetSiteAuthenticatorDescriptorsCommand() (result *DescriptorsView, resp *http.Response, err error) { - path := "/siteAuthenticators/descriptors" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetSiteAuthenticatorDescriptorCommand - Get descriptor for a Site Authenticator type -//RequestType: GET -//Input: input *GetSiteAuthenticatorDescriptorCommandInput -func (s *SiteAuthenticatorsService) GetSiteAuthenticatorDescriptorCommand(input *GetSiteAuthenticatorDescriptorCommandInput) (result *DescriptorView, resp *http.Response, err error) { - path := "/siteAuthenticators/descriptors/{siteAuthenticatorType}" - path = strings.Replace(path, "{siteAuthenticatorType}", input.SiteAuthenticatorType, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetSiteAuthenticatorDescriptorCommandInput struct { - SiteAuthenticatorType string -} - -//DeleteSiteAuthenticatorCommand - Delete a Site Authenticator -//RequestType: DELETE -//Input: input *DeleteSiteAuthenticatorCommandInput -func (s *SiteAuthenticatorsService) DeleteSiteAuthenticatorCommand(input *DeleteSiteAuthenticatorCommandInput) (resp *http.Response, err error) { - path := "/siteAuthenticators/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteSiteAuthenticatorCommandInput struct { - Id string -} - -//GetSiteAuthenticatorCommand - Get a Site Authenticator -//RequestType: GET -//Input: input *GetSiteAuthenticatorCommandInput -func (s *SiteAuthenticatorsService) GetSiteAuthenticatorCommand(input *GetSiteAuthenticatorCommandInput) (result *SiteAuthenticatorView, resp *http.Response, err error) { - path := "/siteAuthenticators/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetSiteAuthenticatorCommandInput struct { - Id string -} - -//UpdateSiteAuthenticatorCommand - Update a Site Authenticator -//RequestType: PUT -//Input: input *UpdateSiteAuthenticatorCommandInput -func (s *SiteAuthenticatorsService) UpdateSiteAuthenticatorCommand(input *UpdateSiteAuthenticatorCommandInput) (result *SiteAuthenticatorView, resp *http.Response, err error) { - path := "/siteAuthenticators/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateSiteAuthenticatorCommandInput struct { - Body SiteAuthenticatorView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sites.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sites.go deleted file mode 100644 index 5ff7571e..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/sites.go +++ /dev/null @@ -1,165 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type SitesService service - -type SitesAPI interface { - GetSitesCommand(input *GetSitesCommandInput) (result *SitesView, resp *http.Response, err error) - AddSiteCommand(input *AddSiteCommandInput) (result *SiteView, resp *http.Response, err error) - DeleteSiteCommand(input *DeleteSiteCommandInput) (resp *http.Response, err error) - GetSiteCommand(input *GetSiteCommandInput) (result *SiteView, resp *http.Response, err error) - UpdateSiteCommand(input *UpdateSiteCommandInput) (result *SiteView, resp *http.Response, err error) -} - -//GetSitesCommand - Get all Sites -//RequestType: GET -//Input: input *GetSitesCommandInput -func (s *SitesService) GetSitesCommand(input *GetSitesCommandInput) (result *SitesView, resp *http.Response, err error) { - path := "/sites" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetSitesCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddSiteCommand - Create a Site -//RequestType: POST -//Input: input *AddSiteCommandInput -func (s *SitesService) AddSiteCommand(input *AddSiteCommandInput) (result *SiteView, resp *http.Response, err error) { - path := "/sites" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddSiteCommandInput struct { - Body SiteView -} - -//DeleteSiteCommand - Delete a Site -//RequestType: DELETE -//Input: input *DeleteSiteCommandInput -func (s *SitesService) DeleteSiteCommand(input *DeleteSiteCommandInput) (resp *http.Response, err error) { - path := "/sites/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteSiteCommandInput struct { - Id string -} - -//GetSiteCommand - Get a Site -//RequestType: GET -//Input: input *GetSiteCommandInput -func (s *SitesService) GetSiteCommand(input *GetSiteCommandInput) (result *SiteView, resp *http.Response, err error) { - path := "/sites/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetSiteCommandInput struct { - Id string -} - -//UpdateSiteCommand - Update a Site -//RequestType: PUT -//Input: input *UpdateSiteCommandInput -func (s *SitesService) UpdateSiteCommand(input *UpdateSiteCommandInput) (result *SiteView, resp *http.Response, err error) { - path := "/sites/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateSiteCommandInput struct { - Body SiteView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/thirdPartyServices.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/thirdPartyServices.go deleted file mode 100644 index bc8b80aa..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/thirdPartyServices.go +++ /dev/null @@ -1,165 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type ThirdPartyServicesService service - -type ThirdPartyServicesAPI interface { - GetThirdPartyServicesCommand(input *GetThirdPartyServicesCommandInput) (result *ThirdPartyServicesView, resp *http.Response, err error) - AddThirdPartyServiceCommand(input *AddThirdPartyServiceCommandInput) (result *ThirdPartyServiceView, resp *http.Response, err error) - DeleteThirdPartyServiceCommand(input *DeleteThirdPartyServiceCommandInput) (resp *http.Response, err error) - GetThirdPartyServiceCommand(input *GetThirdPartyServiceCommandInput) (result *ThirdPartyServiceView, resp *http.Response, err error) - UpdateThirdPartyServiceCommand(input *UpdateThirdPartyServiceCommandInput) (result *ThirdPartyServiceView, resp *http.Response, err error) -} - -//GetThirdPartyServicesCommand - Get all Third-Party Services -//RequestType: GET -//Input: input *GetThirdPartyServicesCommandInput -func (s *ThirdPartyServicesService) GetThirdPartyServicesCommand(input *GetThirdPartyServicesCommandInput) (result *ThirdPartyServicesView, resp *http.Response, err error) { - path := "/thirdPartyServices" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetThirdPartyServicesCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddThirdPartyServiceCommand - Create a Third-Party Service -//RequestType: POST -//Input: input *AddThirdPartyServiceCommandInput -func (s *ThirdPartyServicesService) AddThirdPartyServiceCommand(input *AddThirdPartyServiceCommandInput) (result *ThirdPartyServiceView, resp *http.Response, err error) { - path := "/thirdPartyServices" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddThirdPartyServiceCommandInput struct { - Body ThirdPartyServiceView -} - -//DeleteThirdPartyServiceCommand - Delete a Third-Party Service -//RequestType: DELETE -//Input: input *DeleteThirdPartyServiceCommandInput -func (s *ThirdPartyServicesService) DeleteThirdPartyServiceCommand(input *DeleteThirdPartyServiceCommandInput) (resp *http.Response, err error) { - path := "/thirdPartyServices/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteThirdPartyServiceCommandInput struct { - Id string -} - -//GetThirdPartyServiceCommand - Get a Third-Party Service -//RequestType: GET -//Input: input *GetThirdPartyServiceCommandInput -func (s *ThirdPartyServicesService) GetThirdPartyServiceCommand(input *GetThirdPartyServiceCommandInput) (result *ThirdPartyServiceView, resp *http.Response, err error) { - path := "/thirdPartyServices/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetThirdPartyServiceCommandInput struct { - Id string -} - -//UpdateThirdPartyServiceCommand - Update a Third-Party Service -//RequestType: PUT -//Input: input *UpdateThirdPartyServiceCommandInput -func (s *ThirdPartyServicesService) UpdateThirdPartyServiceCommand(input *UpdateThirdPartyServiceCommandInput) (result *ThirdPartyServiceView, resp *http.Response, err error) { - path := "/thirdPartyServices/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateThirdPartyServiceCommandInput struct { - Body ThirdPartyServiceView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/tokenProvider.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/tokenProvider.go deleted file mode 100644 index 9f02aa84..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/tokenProvider.go +++ /dev/null @@ -1,76 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type TokenProviderService service - -type TokenProviderAPI interface { - DeleteTokenProviderSettingCommand() (resp *http.Response, err error) - GetTokenProviderSettingCommand() (result *TokenProviderSettingView, resp *http.Response, err error) - UpdateTokenProviderSettingCommand(input *UpdateTokenProviderSettingCommandInput) (result *TokenProviderSettingView, resp *http.Response, err error) -} - -//DeleteTokenProviderSettingCommand - Resets the Token Provider settings to default values -//RequestType: DELETE -//Input: -func (s *TokenProviderService) DeleteTokenProviderSettingCommand() (resp *http.Response, err error) { - path := "/tokenProvider/settings" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetTokenProviderSettingCommand - Get the Token Provider settings -//RequestType: GET -//Input: -func (s *TokenProviderService) GetTokenProviderSettingCommand() (result *TokenProviderSettingView, resp *http.Response, err error) { - path := "/tokenProvider/settings" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateTokenProviderSettingCommand - Update the Token Provider setting -//RequestType: PUT -//Input: input *UpdateTokenProviderSettingCommandInput -func (s *TokenProviderService) UpdateTokenProviderSettingCommand(input *UpdateTokenProviderSettingCommandInput) (result *TokenProviderSettingView, resp *http.Response, err error) { - path := "/tokenProvider/settings" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateTokenProviderSettingCommandInput struct { - Body TokenProviderSettingView -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/trustedCertificateGroups.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/trustedCertificateGroups.go deleted file mode 100644 index 47410471..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/trustedCertificateGroups.go +++ /dev/null @@ -1,165 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type TrustedCertificateGroupsService service - -type TrustedCertificateGroupsAPI interface { - GetTrustedCertificateGroupsCommand(input *GetTrustedCertificateGroupsCommandInput) (result *TrustedCertificateGroupsView, resp *http.Response, err error) - AddTrustedCertificateGroupCommand(input *AddTrustedCertificateGroupCommandInput) (result *TrustedCertificateGroupView, resp *http.Response, err error) - DeleteTrustedCertificateGroupCommand(input *DeleteTrustedCertificateGroupCommandInput) (resp *http.Response, err error) - GetTrustedCertificateGroupCommand(input *GetTrustedCertificateGroupCommandInput) (result *TrustedCertificateGroupView, resp *http.Response, err error) - UpdateTrustedCertificateGroupCommand(input *UpdateTrustedCertificateGroupCommandInput) (result *TrustedCertificateGroupView, resp *http.Response, err error) -} - -//GetTrustedCertificateGroupsCommand - Get all Trusted Certificate Groups -//RequestType: GET -//Input: input *GetTrustedCertificateGroupsCommandInput -func (s *TrustedCertificateGroupsService) GetTrustedCertificateGroupsCommand(input *GetTrustedCertificateGroupsCommandInput) (result *TrustedCertificateGroupsView, resp *http.Response, err error) { - path := "/trustedCertificateGroups" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetTrustedCertificateGroupsCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddTrustedCertificateGroupCommand - Create a Trusted Certificate Group -//RequestType: POST -//Input: input *AddTrustedCertificateGroupCommandInput -func (s *TrustedCertificateGroupsService) AddTrustedCertificateGroupCommand(input *AddTrustedCertificateGroupCommandInput) (result *TrustedCertificateGroupView, resp *http.Response, err error) { - path := "/trustedCertificateGroups" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddTrustedCertificateGroupCommandInput struct { - Body TrustedCertificateGroupView -} - -//DeleteTrustedCertificateGroupCommand - Delete a Trusted Certificate Group -//RequestType: DELETE -//Input: input *DeleteTrustedCertificateGroupCommandInput -func (s *TrustedCertificateGroupsService) DeleteTrustedCertificateGroupCommand(input *DeleteTrustedCertificateGroupCommandInput) (resp *http.Response, err error) { - path := "/trustedCertificateGroups/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteTrustedCertificateGroupCommandInput struct { - Id string -} - -//GetTrustedCertificateGroupCommand - Get a Trusted Certificate Group -//RequestType: GET -//Input: input *GetTrustedCertificateGroupCommandInput -func (s *TrustedCertificateGroupsService) GetTrustedCertificateGroupCommand(input *GetTrustedCertificateGroupCommandInput) (result *TrustedCertificateGroupView, resp *http.Response, err error) { - path := "/trustedCertificateGroups/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetTrustedCertificateGroupCommandInput struct { - Id string -} - -//UpdateTrustedCertificateGroupCommand - Update a TrustedCertificateGroup -//RequestType: PUT -//Input: input *UpdateTrustedCertificateGroupCommandInput -func (s *TrustedCertificateGroupsService) UpdateTrustedCertificateGroupCommand(input *UpdateTrustedCertificateGroupCommandInput) (result *TrustedCertificateGroupView, resp *http.Response, err error) { - path := "/trustedCertificateGroups/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateTrustedCertificateGroupCommandInput struct { - Body TrustedCertificateGroupView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/unknownResources.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/unknownResources.go deleted file mode 100644 index e93ea899..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/unknownResources.go +++ /dev/null @@ -1,76 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type UnknownResourcesService service - -type UnknownResourcesAPI interface { - Delete() (resp *http.Response, err error) - Get() (result *UnknownResourceSettingsView, resp *http.Response, err error) - Update(input *UpdateInput) (result *UnknownResourceSettingsView, resp *http.Response, err error) -} - -//Delete - Resets the global settings for unknown resources to default values -//RequestType: DELETE -//Input: -func (s *UnknownResourcesService) Delete() (resp *http.Response, err error) { - path := "/unknownResources/settings" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//Get - Retrieves the global settings for unknown resources -//RequestType: GET -//Input: -func (s *UnknownResourcesService) Get() (result *UnknownResourceSettingsView, resp *http.Response, err error) { - path := "/unknownResources/settings" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//Update - Updates the global settings for unknown resources -//RequestType: PUT -//Input: input *UpdateInput -func (s *UnknownResourcesService) Update(input *UpdateInput) (result *UnknownResourceSettingsView, resp *http.Response, err error) { - path := "/unknownResources/settings" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateInput struct { - Body UnknownResourceSettingsView -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/users.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/users.go deleted file mode 100644 index 7c4f1e5c..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/users.go +++ /dev/null @@ -1,142 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type UsersService service - -type UsersAPI interface { - GetUsersCommand(input *GetUsersCommandInput) (result *UsersView, resp *http.Response, err error) - GetUserCommand(input *GetUserCommandInput) (result *UserView, resp *http.Response, err error) - UpdateUserCommand(input *UpdateUserCommandInput) (result *UserView, resp *http.Response, err error) - UpdateUserPasswordCommand(input *UpdateUserPasswordCommandInput) (result *UserPasswordView, resp *http.Response, err error) -} - -//GetUsersCommand - Get all Users -//RequestType: GET -//Input: input *GetUsersCommandInput -func (s *UsersService) GetUsersCommand(input *GetUsersCommandInput) (result *UsersView, resp *http.Response, err error) { - path := "/users" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Username != "" { - q.Set("username", input.Username) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetUsersCommandInput struct { - Page string - NumberPerPage string - Filter string - Username string - SortKey string - Order string -} - -//GetUserCommand - Get a User -//RequestType: GET -//Input: input *GetUserCommandInput -func (s *UsersService) GetUserCommand(input *GetUserCommandInput) (result *UserView, resp *http.Response, err error) { - path := "/users/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetUserCommandInput struct { - Id string -} - -//UpdateUserCommand - Update a User -//RequestType: PUT -//Input: input *UpdateUserCommandInput -func (s *UsersService) UpdateUserCommand(input *UpdateUserCommandInput) (result *UserView, resp *http.Response, err error) { - path := "/users/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateUserCommandInput struct { - Body UserView - Id string -} - -//UpdateUserPasswordCommand - Update a User's Password -//RequestType: PUT -//Input: input *UpdateUserPasswordCommandInput -func (s *UsersService) UpdateUserPasswordCommand(input *UpdateUserPasswordCommandInput) (result *UserPasswordView, resp *http.Response, err error) { - path := "/users/{id}/password" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateUserPasswordCommandInput struct { - Body UserPasswordView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/version.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/version.go deleted file mode 100644 index 6bc6c2fa..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/version.go +++ /dev/null @@ -1,32 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type VersionService service - -type VersionAPI interface { - VersionCommand() (result *VersionDocClass, resp *http.Response, err error) -} - -//VersionCommand - Get the PingAccess version number -//RequestType: GET -//Input: -func (s *VersionService) VersionCommand() (result *VersionDocClass, resp *http.Response, err error) { - path := "/version" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/virtualhosts.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/virtualhosts.go deleted file mode 100644 index c5d5438c..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/virtualhosts.go +++ /dev/null @@ -1,165 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type VirtualhostsService service - -type VirtualhostsAPI interface { - GetVirtualHostsCommand(input *GetVirtualHostsCommandInput) (result *VirtualHostsView, resp *http.Response, err error) - AddVirtualHostCommand(input *AddVirtualHostCommandInput) (result *VirtualHostView, resp *http.Response, err error) - DeleteVirtualHostCommand(input *DeleteVirtualHostCommandInput) (resp *http.Response, err error) - GetVirtualHostCommand(input *GetVirtualHostCommandInput) (result *VirtualHostView, resp *http.Response, err error) - UpdateVirtualHostCommand(input *UpdateVirtualHostCommandInput) (result *VirtualHostView, resp *http.Response, err error) -} - -//GetVirtualHostsCommand - Get all Virtual Hosts -//RequestType: GET -//Input: input *GetVirtualHostsCommandInput -func (s *VirtualhostsService) GetVirtualHostsCommand(input *GetVirtualHostsCommandInput) (result *VirtualHostsView, resp *http.Response, err error) { - path := "/virtualhosts" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.VirtualHost != "" { - q.Set("virtualHost", input.VirtualHost) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetVirtualHostsCommandInput struct { - Page string - NumberPerPage string - Filter string - VirtualHost string - SortKey string - Order string -} - -//AddVirtualHostCommand - Create a Virtual Host -//RequestType: POST -//Input: input *AddVirtualHostCommandInput -func (s *VirtualhostsService) AddVirtualHostCommand(input *AddVirtualHostCommandInput) (result *VirtualHostView, resp *http.Response, err error) { - path := "/virtualhosts" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddVirtualHostCommandInput struct { - Body VirtualHostView -} - -//DeleteVirtualHostCommand - Delete a Virtual Host -//RequestType: DELETE -//Input: input *DeleteVirtualHostCommandInput -func (s *VirtualhostsService) DeleteVirtualHostCommand(input *DeleteVirtualHostCommandInput) (resp *http.Response, err error) { - path := "/virtualhosts/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteVirtualHostCommandInput struct { - Id string -} - -//GetVirtualHostCommand - Get a Virtual Host -//RequestType: GET -//Input: input *GetVirtualHostCommandInput -func (s *VirtualhostsService) GetVirtualHostCommand(input *GetVirtualHostCommandInput) (result *VirtualHostView, resp *http.Response, err error) { - path := "/virtualhosts/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetVirtualHostCommandInput struct { - Id string -} - -//UpdateVirtualHostCommand - Update a Virtual Host -//RequestType: PUT -//Input: input *UpdateVirtualHostCommandInput -func (s *VirtualhostsService) UpdateVirtualHostCommand(input *UpdateVirtualHostCommandInput) (result *VirtualHostView, resp *http.Response, err error) { - path := "/virtualhosts/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateVirtualHostCommandInput struct { - Body VirtualHostView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessionManagement.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessionManagement.go deleted file mode 100644 index 971b63e6..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessionManagement.go +++ /dev/null @@ -1,269 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" -) - -type WebSessionManagementService service - -type WebSessionManagementAPI interface { - DeleteWebSessionManagementCommand() (resp *http.Response, err error) - GetWebSessionManagementCommand() (result *WebSessionManagementView, resp *http.Response, err error) - UpdateWebSessionManagementCommand(input *UpdateWebSessionManagementCommandInput) (result *WebSessionManagementView, resp *http.Response, err error) - GetCookieTypes() (result *CookieTypesView, resp *http.Response, err error) - GetWebSessionSupportedEncryptionAlgorithmsCommand() (result *AlgorithmsView, resp *http.Response, err error) - GetWebSessionKeySetCommand() (result *KeySetView, resp *http.Response, err error) - UpdateWebSessionKeySetCommand(input *UpdateWebSessionKeySetCommandInput) (result *KeySetView, resp *http.Response, err error) - GetOidcLoginTypes() (result *OidcLoginTypesView, resp *http.Response, err error) - GetOidcScopesCommand(input *GetOidcScopesCommandInput) (result *SupportedScopesView, resp *http.Response, err error) - GetRequestPreservationTypes() (result *RequestPreservationTypesView, resp *http.Response, err error) - GetWebSessionSupportedSigningAlgorithms() (result *SigningAlgorithmsView, resp *http.Response, err error) - GetWebStorageTypes() (result *WebStorageTypesView, resp *http.Response, err error) -} - -//DeleteWebSessionManagementCommand - Resets the Web Session Management configuration to default values -//RequestType: DELETE -//Input: -func (s *WebSessionManagementService) DeleteWebSessionManagementCommand() (resp *http.Response, err error) { - path := "/webSessionManagement" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -//GetWebSessionManagementCommand - Get the Web Session Management configuration -//RequestType: GET -//Input: -func (s *WebSessionManagementService) GetWebSessionManagementCommand() (result *WebSessionManagementView, resp *http.Response, err error) { - path := "/webSessionManagement" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateWebSessionManagementCommand - Update the Web Session Management configuration -//RequestType: PUT -//Input: input *UpdateWebSessionManagementCommandInput -func (s *WebSessionManagementService) UpdateWebSessionManagementCommand(input *UpdateWebSessionManagementCommandInput) (result *WebSessionManagementView, resp *http.Response, err error) { - path := "/webSessionManagement" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateWebSessionManagementCommandInput struct { - Body WebSessionManagementView -} - -//GetCookieTypes - Get the valid OIDC Cookie Types -//RequestType: GET -//Input: -func (s *WebSessionManagementService) GetCookieTypes() (result *CookieTypesView, resp *http.Response, err error) { - path := "/webSessionManagement/cookieTypes" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetWebSessionSupportedEncryptionAlgorithmsCommand - Get the valid algorithms for Web Session Cookie Encryption -//RequestType: GET -//Input: -func (s *WebSessionManagementService) GetWebSessionSupportedEncryptionAlgorithmsCommand() (result *AlgorithmsView, resp *http.Response, err error) { - path := "/webSessionManagement/encryptionAlgorithms" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetWebSessionKeySetCommand - Get the Web Session key set configuration -//RequestType: GET -//Input: -func (s *WebSessionManagementService) GetWebSessionKeySetCommand() (result *KeySetView, resp *http.Response, err error) { - path := "/webSessionManagement/keySet" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//UpdateWebSessionKeySetCommand - Update the WebSession key set configuration -//RequestType: PUT -//Input: input *UpdateWebSessionKeySetCommandInput -func (s *WebSessionManagementService) UpdateWebSessionKeySetCommand(input *UpdateWebSessionKeySetCommandInput) (result *KeySetView, resp *http.Response, err error) { - path := "/webSessionManagement/keySet" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateWebSessionKeySetCommandInput struct { - Body KeySetView -} - -//GetOidcLoginTypes - Get the valid OIDC Login Types -//RequestType: GET -//Input: -func (s *WebSessionManagementService) GetOidcLoginTypes() (result *OidcLoginTypesView, resp *http.Response, err error) { - path := "/webSessionManagement/oidcLoginTypes" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetOidcScopesCommand - Get the scopes supported by the current OIDC Provider -//RequestType: GET -//Input: input *GetOidcScopesCommandInput -func (s *WebSessionManagementService) GetOidcScopesCommand(input *GetOidcScopesCommandInput) (result *SupportedScopesView, resp *http.Response, err error) { - path := "/webSessionManagement/oidcScopes" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.ClientId != "" { - q.Set("clientId", input.ClientId) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetOidcScopesCommandInput struct { - ClientId string -} - -//GetRequestPreservationTypes - Get the valid Request Preservation Types -//RequestType: GET -//Input: -func (s *WebSessionManagementService) GetRequestPreservationTypes() (result *RequestPreservationTypesView, resp *http.Response, err error) { - path := "/webSessionManagement/requestPreservationTypes" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetWebSessionSupportedSigningAlgorithms - Get the valid algorithms for Web Session Cookie Signing -//RequestType: GET -//Input: -func (s *WebSessionManagementService) GetWebSessionSupportedSigningAlgorithms() (result *SigningAlgorithmsView, resp *http.Response, err error) { - path := "/webSessionManagement/signingAlgorithms" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -//GetWebStorageTypes - Get the valid Web Storage Types -//RequestType: GET -//Input: -func (s *WebSessionManagementService) GetWebStorageTypes() (result *WebStorageTypesView, resp *http.Response, err error) { - path := "/webSessionManagement/webStorageTypes" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessions.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessions.go deleted file mode 100644 index e64052d5..00000000 --- a/vendor/github.com/iwarapter/pingaccess-sdk-go/pingaccess/webSessions.go +++ /dev/null @@ -1,165 +0,0 @@ -package pingaccess - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -type WebSessionsService service - -type WebSessionsAPI interface { - GetWebSessionsCommand(input *GetWebSessionsCommandInput) (result *WebSessionsView, resp *http.Response, err error) - AddWebSessionCommand(input *AddWebSessionCommandInput) (result *WebSessionView, resp *http.Response, err error) - DeleteWebSessionCommand(input *DeleteWebSessionCommandInput) (resp *http.Response, err error) - GetWebSessionCommand(input *GetWebSessionCommandInput) (result *WebSessionView, resp *http.Response, err error) - UpdateWebSessionCommand(input *UpdateWebSessionCommandInput) (result *WebSessionView, resp *http.Response, err error) -} - -//GetWebSessionsCommand - Get all WebSessions -//RequestType: GET -//Input: input *GetWebSessionsCommandInput -func (s *WebSessionsService) GetWebSessionsCommand(input *GetWebSessionsCommandInput) (result *WebSessionsView, resp *http.Response, err error) { - path := "/webSessions" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - q := rel.Query() - if input.Page != "" { - q.Set("page", input.Page) - } - if input.NumberPerPage != "" { - q.Set("numberPerPage", input.NumberPerPage) - } - if input.Filter != "" { - q.Set("filter", input.Filter) - } - if input.Name != "" { - q.Set("name", input.Name) - } - if input.SortKey != "" { - q.Set("sortKey", input.SortKey) - } - if input.Order != "" { - q.Set("order", input.Order) - } - rel.RawQuery = q.Encode() - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetWebSessionsCommandInput struct { - Page string - NumberPerPage string - Filter string - Name string - SortKey string - Order string -} - -//AddWebSessionCommand - Create a WebSession -//RequestType: POST -//Input: input *AddWebSessionCommandInput -func (s *WebSessionsService) AddWebSessionCommand(input *AddWebSessionCommandInput) (result *WebSessionView, resp *http.Response, err error) { - path := "/webSessions" - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("POST", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type AddWebSessionCommandInput struct { - Body WebSessionView -} - -//DeleteWebSessionCommand - Delete a WebSession -//RequestType: DELETE -//Input: input *DeleteWebSessionCommandInput -func (s *WebSessionsService) DeleteWebSessionCommand(input *DeleteWebSessionCommandInput) (resp *http.Response, err error) { - path := "/webSessions/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("DELETE", rel, nil) - if err != nil { - return nil, err - } - - resp, err = s.client.do(req, nil) - if err != nil { - return resp, err - } - return resp, nil - -} - -type DeleteWebSessionCommandInput struct { - Id string -} - -//GetWebSessionCommand - Get a WebSession -//RequestType: GET -//Input: input *GetWebSessionCommandInput -func (s *WebSessionsService) GetWebSessionCommand(input *GetWebSessionCommandInput) (result *WebSessionView, resp *http.Response, err error) { - path := "/webSessions/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("GET", rel, nil) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type GetWebSessionCommandInput struct { - Id string -} - -//UpdateWebSessionCommand - Update a WebSession -//RequestType: PUT -//Input: input *UpdateWebSessionCommandInput -func (s *WebSessionsService) UpdateWebSessionCommand(input *UpdateWebSessionCommandInput) (result *WebSessionView, resp *http.Response, err error) { - path := "/webSessions/{id}" - path = strings.Replace(path, "{id}", input.Id, -1) - - rel := &url.URL{Path: fmt.Sprintf("%s%s", s.client.Context, path)} - req, err := s.client.newRequest("PUT", rel, input.Body) - if err != nil { - return nil, nil, err - } - - resp, err = s.client.do(req, &result) - if err != nil { - return result, resp, err - } - return result, resp, nil - -} - -type UpdateWebSessionCommandInput struct { - Body WebSessionView - Id string -} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators/accessTokenValidators.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators/accessTokenValidators.go new file mode 100644 index 00000000..c776276f --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators/accessTokenValidators.go @@ -0,0 +1,214 @@ +package accessTokenValidators + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "AccessTokenValidators" +) + +//AccessTokenValidatorsService provides the API operations for making requests to +// AccessTokenValidators endpoint. +type AccessTokenValidatorsService struct { + *client.Client +} + +//New createa a new instance of the AccessTokenValidatorsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a AccessTokenValidatorsService from the configuration +// svc := accessTokenValidators.New(cfg) +// +func New(cfg *config.Config) *AccessTokenValidatorsService { + + return &AccessTokenValidatorsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a AccessTokenValidators operation +func (s *AccessTokenValidatorsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetAccessTokenValidatorsCommand - Get all Access Token Validators +//RequestType: GET +//Input: input *GetAccessTokenValidatorsCommandInput +func (s *AccessTokenValidatorsService) GetAccessTokenValidatorsCommand(input *GetAccessTokenValidatorsCommandInput) (output *models.AccessTokenValidatorsView, resp *http.Response, err error) { + path := "/accessTokenValidators" + op := &request.Operation{ + Name: "GetAccessTokenValidatorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.AccessTokenValidatorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAccessTokenValidatorsCommandInput - Inputs for GetAccessTokenValidatorsCommand +type GetAccessTokenValidatorsCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddAccessTokenValidatorCommand - Create an Access Token Validator +//RequestType: POST +//Input: input *AddAccessTokenValidatorCommandInput +func (s *AccessTokenValidatorsService) AddAccessTokenValidatorCommand(input *AddAccessTokenValidatorCommandInput) (output *models.AccessTokenValidatorView, resp *http.Response, err error) { + path := "/accessTokenValidators" + op := &request.Operation{ + Name: "AddAccessTokenValidatorCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AccessTokenValidatorView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddAccessTokenValidatorCommandInput - Inputs for AddAccessTokenValidatorCommand +type AddAccessTokenValidatorCommandInput struct { + Body models.AccessTokenValidatorView +} + +//GetAccessTokenValidatorDescriptorsCommand - Get descriptors for all Access Token Validators +//RequestType: GET +//Input: +func (s *AccessTokenValidatorsService) GetAccessTokenValidatorDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) { + path := "/accessTokenValidators/descriptors" + op := &request.Operation{ + Name: "GetAccessTokenValidatorDescriptorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.DescriptorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//DeleteAccessTokenValidatorCommand - Delete a Access Token Validator +//RequestType: DELETE +//Input: input *DeleteAccessTokenValidatorCommandInput +func (s *AccessTokenValidatorsService) DeleteAccessTokenValidatorCommand(input *DeleteAccessTokenValidatorCommandInput) (resp *http.Response, err error) { + path := "/accessTokenValidators/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteAccessTokenValidatorCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteAccessTokenValidatorCommandInput - Inputs for DeleteAccessTokenValidatorCommand +type DeleteAccessTokenValidatorCommandInput struct { + Id string +} + +//GetAccessTokenValidatorCommand - Get an Access Token Validator +//RequestType: GET +//Input: input *GetAccessTokenValidatorCommandInput +func (s *AccessTokenValidatorsService) GetAccessTokenValidatorCommand(input *GetAccessTokenValidatorCommandInput) (output *models.AccessTokenValidatorView, resp *http.Response, err error) { + path := "/accessTokenValidators/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetAccessTokenValidatorCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AccessTokenValidatorView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAccessTokenValidatorCommandInput - Inputs for GetAccessTokenValidatorCommand +type GetAccessTokenValidatorCommandInput struct { + Id string +} + +//UpdateAccessTokenValidatorCommand - Update an Access Token Validator +//RequestType: PUT +//Input: input *UpdateAccessTokenValidatorCommandInput +func (s *AccessTokenValidatorsService) UpdateAccessTokenValidatorCommand(input *UpdateAccessTokenValidatorCommandInput) (output *models.AccessTokenValidatorView, resp *http.Response, err error) { + path := "/accessTokenValidators/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateAccessTokenValidatorCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AccessTokenValidatorView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateAccessTokenValidatorCommandInput - Inputs for UpdateAccessTokenValidatorCommand +type UpdateAccessTokenValidatorCommandInput struct { + Body models.AccessTokenValidatorView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators/interface.go new file mode 100644 index 00000000..9e65b360 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators/interface.go @@ -0,0 +1,16 @@ +package accessTokenValidators + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type AccessTokenValidatorsAPI interface { + GetAccessTokenValidatorsCommand(input *GetAccessTokenValidatorsCommandInput) (output *models.AccessTokenValidatorsView, resp *http.Response, err error) + AddAccessTokenValidatorCommand(input *AddAccessTokenValidatorCommandInput) (output *models.AccessTokenValidatorView, resp *http.Response, err error) + GetAccessTokenValidatorDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) + DeleteAccessTokenValidatorCommand(input *DeleteAccessTokenValidatorCommandInput) (resp *http.Response, err error) + GetAccessTokenValidatorCommand(input *GetAccessTokenValidatorCommandInput) (output *models.AccessTokenValidatorView, resp *http.Response, err error) + UpdateAccessTokenValidatorCommand(input *UpdateAccessTokenValidatorCommandInput) (output *models.AccessTokenValidatorView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/acme/acme.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/acme/acme.go new file mode 100644 index 00000000..bec53bde --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/acme/acme.go @@ -0,0 +1,475 @@ +package acme + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Acme" +) + +//AcmeService provides the API operations for making requests to +// Acme endpoint. +type AcmeService struct { + *client.Client +} + +//New createa a new instance of the AcmeService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a AcmeService from the configuration +// svc := acme.New(cfg) +// +func New(cfg *config.Config) *AcmeService { + + return &AcmeService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Acme operation +func (s *AcmeService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetAcmeServersCommand - Get all ACME Servers +//RequestType: GET +//Input: input *GetAcmeServersCommandInput +func (s *AcmeService) GetAcmeServersCommand(input *GetAcmeServersCommandInput) (output *models.AcmeServersView, resp *http.Response, err error) { + path := "/acme/servers" + op := &request.Operation{ + Name: "GetAcmeServersCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.AcmeServersView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAcmeServersCommandInput - Inputs for GetAcmeServersCommand +type GetAcmeServersCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddAcmeServerCommand - Add an ACME Server +//RequestType: POST +//Input: input *AddAcmeServerCommandInput +func (s *AcmeService) AddAcmeServerCommand(input *AddAcmeServerCommandInput) (output *models.AcmeServerView, resp *http.Response, err error) { + path := "/acme/servers" + op := &request.Operation{ + Name: "AddAcmeServerCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AcmeServerView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddAcmeServerCommandInput - Inputs for AddAcmeServerCommand +type AddAcmeServerCommandInput struct { + Body models.AcmeServerView +} + +//GetDefaultAcmeServerCommand - Get the default ACME Server +//RequestType: GET +//Input: +func (s *AcmeService) GetDefaultAcmeServerCommand() (output *models.LinkView, resp *http.Response, err error) { + path := "/acme/servers/default" + op := &request.Operation{ + Name: "GetDefaultAcmeServerCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.LinkView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateDefaultAcmeServerCommand - Update the default ACME Server +//RequestType: PUT +//Input: input *UpdateDefaultAcmeServerCommandInput +func (s *AcmeService) UpdateDefaultAcmeServerCommand(input *UpdateDefaultAcmeServerCommandInput) (output *models.LinkView, resp *http.Response, err error) { + path := "/acme/servers/default" + op := &request.Operation{ + Name: "UpdateDefaultAcmeServerCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.LinkView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateDefaultAcmeServerCommandInput - Inputs for UpdateDefaultAcmeServerCommand +type UpdateDefaultAcmeServerCommandInput struct { + Body models.LinkView +} + +//DeleteAcmeServerCommand - Delete an ACME Server +//RequestType: DELETE +//Input: input *DeleteAcmeServerCommandInput +func (s *AcmeService) DeleteAcmeServerCommand(input *DeleteAcmeServerCommandInput) (output *models.AcmeServerView, resp *http.Response, err error) { + path := "/acme/servers/{acmeServerId}" + path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) + + op := &request.Operation{ + Name: "DeleteAcmeServerCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AcmeServerView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// DeleteAcmeServerCommandInput - Inputs for DeleteAcmeServerCommand +type DeleteAcmeServerCommandInput struct { + AcmeServerId string +} + +//GetAcmeServerCommand - Get an ACME Server +//RequestType: GET +//Input: input *GetAcmeServerCommandInput +func (s *AcmeService) GetAcmeServerCommand(input *GetAcmeServerCommandInput) (output *models.AcmeServerView, resp *http.Response, err error) { + path := "/acme/servers/{acmeServerId}" + path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) + + op := &request.Operation{ + Name: "GetAcmeServerCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AcmeServerView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAcmeServerCommandInput - Inputs for GetAcmeServerCommand +type GetAcmeServerCommandInput struct { + AcmeServerId string +} + +//GetAcmeAccountsCommand - Get all ACME Accounts +//RequestType: GET +//Input: input *GetAcmeAccountsCommandInput +func (s *AcmeService) GetAcmeAccountsCommand(input *GetAcmeAccountsCommandInput) (output *models.AcmeAccountView, resp *http.Response, err error) { + path := "/acme/servers/{acmeServerId}/accounts" + path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) + + op := &request.Operation{ + Name: "GetAcmeAccountsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.AcmeAccountView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAcmeAccountsCommandInput - Inputs for GetAcmeAccountsCommand +type GetAcmeAccountsCommandInput struct { + Page string + NumberPerPage string + SortKey string + Order string + + AcmeServerId string +} + +//AddAcmeAccountCommand - Add an ACME Account +//RequestType: POST +//Input: input *AddAcmeAccountCommandInput +func (s *AcmeService) AddAcmeAccountCommand(input *AddAcmeAccountCommandInput) (output *models.AcmeAccountView, resp *http.Response, err error) { + path := "/acme/servers/{acmeServerId}/accounts" + path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) + + op := &request.Operation{ + Name: "AddAcmeAccountCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AcmeAccountView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddAcmeAccountCommandInput - Inputs for AddAcmeAccountCommand +type AddAcmeAccountCommandInput struct { + Body models.AcmeAccountView + AcmeServerId string +} + +//DeleteAcmeAccountCommand - Delete an ACME Account +//RequestType: DELETE +//Input: input *DeleteAcmeAccountCommandInput +func (s *AcmeService) DeleteAcmeAccountCommand(input *DeleteAcmeAccountCommandInput) (output *models.AcmeAccountView, resp *http.Response, err error) { + path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}" + path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) + + path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) + + op := &request.Operation{ + Name: "DeleteAcmeAccountCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AcmeAccountView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// DeleteAcmeAccountCommandInput - Inputs for DeleteAcmeAccountCommand +type DeleteAcmeAccountCommandInput struct { + AcmeServerId string + AcmeAccountId string +} + +//GetAcmeAccountCommand - Get an ACME Account +//RequestType: GET +//Input: input *GetAcmeAccountCommandInput +func (s *AcmeService) GetAcmeAccountCommand(input *GetAcmeAccountCommandInput) (output *models.AcmeAccountView, resp *http.Response, err error) { + path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}" + path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) + + path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) + + op := &request.Operation{ + Name: "GetAcmeAccountCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AcmeAccountView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAcmeAccountCommandInput - Inputs for GetAcmeAccountCommand +type GetAcmeAccountCommandInput struct { + AcmeServerId string + AcmeAccountId string +} + +//GetAcmeCertificateRequestsCommand - Get all ACME Certificate Requests +//RequestType: GET +//Input: input *GetAcmeCertificateRequestsCommandInput +func (s *AcmeService) GetAcmeCertificateRequestsCommand(input *GetAcmeCertificateRequestsCommandInput) (output *models.AcmeCertificateRequestView, resp *http.Response, err error) { + path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}/certificateRequests" + path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) + + path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) + + op := &request.Operation{ + Name: "GetAcmeCertificateRequestsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "keyPairId": input.KeyPairId, + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.AcmeCertificateRequestView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAcmeCertificateRequestsCommandInput - Inputs for GetAcmeCertificateRequestsCommand +type GetAcmeCertificateRequestsCommandInput struct { + KeyPairId string + Page string + NumberPerPage string + SortKey string + Order string + + AcmeServerId string + AcmeAccountId string +} + +//AddAcmeCertificateRequestCommand - Initiate the ACME protocol +//RequestType: POST +//Input: input *AddAcmeCertificateRequestCommandInput +func (s *AcmeService) AddAcmeCertificateRequestCommand(input *AddAcmeCertificateRequestCommandInput) (output *models.AcmeCertificateRequestView, resp *http.Response, err error) { + path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}/certificateRequests" + path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) + + path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) + + op := &request.Operation{ + Name: "AddAcmeCertificateRequestCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AcmeCertificateRequestView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddAcmeCertificateRequestCommandInput - Inputs for AddAcmeCertificateRequestCommand +type AddAcmeCertificateRequestCommandInput struct { + Body models.AcmeCertificateRequestView + AcmeServerId string + AcmeAccountId string +} + +//DeleteAcmeCertificateRequestCommand - Delete an ACME Certificate Request +//RequestType: DELETE +//Input: input *DeleteAcmeCertificateRequestCommandInput +func (s *AcmeService) DeleteAcmeCertificateRequestCommand(input *DeleteAcmeCertificateRequestCommandInput) (output *models.AcmeCertificateRequestView, resp *http.Response, err error) { + path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}/certificateRequests/{acmeCertificateRequestId}" + path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) + + path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) + + path = strings.Replace(path, "{acmeCertificateRequestId}", input.AcmeCertificateRequestId, -1) + + op := &request.Operation{ + Name: "DeleteAcmeCertificateRequestCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AcmeCertificateRequestView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// DeleteAcmeCertificateRequestCommandInput - Inputs for DeleteAcmeCertificateRequestCommand +type DeleteAcmeCertificateRequestCommandInput struct { + AcmeServerId string + AcmeAccountId string + AcmeCertificateRequestId string +} + +//GetAcmeCertificateRequestCommand - Get an ACME Certificate Request +//RequestType: GET +//Input: input *GetAcmeCertificateRequestCommandInput +func (s *AcmeService) GetAcmeCertificateRequestCommand(input *GetAcmeCertificateRequestCommandInput) (output *models.AcmeCertificateRequestView, resp *http.Response, err error) { + path := "/acme/servers/{acmeServerId}/accounts/{acmeAccountId}/certificateRequests/{acmeCertificateRequestId}" + path = strings.Replace(path, "{acmeServerId}", input.AcmeServerId, -1) + + path = strings.Replace(path, "{acmeAccountId}", input.AcmeAccountId, -1) + + path = strings.Replace(path, "{acmeCertificateRequestId}", input.AcmeCertificateRequestId, -1) + + op := &request.Operation{ + Name: "GetAcmeCertificateRequestCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AcmeCertificateRequestView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAcmeCertificateRequestCommandInput - Inputs for GetAcmeCertificateRequestCommand +type GetAcmeCertificateRequestCommandInput struct { + AcmeServerId string + AcmeAccountId string + AcmeCertificateRequestId string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/acme/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/acme/interface.go new file mode 100644 index 00000000..5d0f40c9 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/acme/interface.go @@ -0,0 +1,24 @@ +package acme + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type AcmeAPI interface { + GetAcmeServersCommand(input *GetAcmeServersCommandInput) (output *models.AcmeServersView, resp *http.Response, err error) + AddAcmeServerCommand(input *AddAcmeServerCommandInput) (output *models.AcmeServerView, resp *http.Response, err error) + GetDefaultAcmeServerCommand() (output *models.LinkView, resp *http.Response, err error) + UpdateDefaultAcmeServerCommand(input *UpdateDefaultAcmeServerCommandInput) (output *models.LinkView, resp *http.Response, err error) + DeleteAcmeServerCommand(input *DeleteAcmeServerCommandInput) (output *models.AcmeServerView, resp *http.Response, err error) + GetAcmeServerCommand(input *GetAcmeServerCommandInput) (output *models.AcmeServerView, resp *http.Response, err error) + GetAcmeAccountsCommand(input *GetAcmeAccountsCommandInput) (output *models.AcmeAccountView, resp *http.Response, err error) + AddAcmeAccountCommand(input *AddAcmeAccountCommandInput) (output *models.AcmeAccountView, resp *http.Response, err error) + DeleteAcmeAccountCommand(input *DeleteAcmeAccountCommandInput) (output *models.AcmeAccountView, resp *http.Response, err error) + GetAcmeAccountCommand(input *GetAcmeAccountCommandInput) (output *models.AcmeAccountView, resp *http.Response, err error) + GetAcmeCertificateRequestsCommand(input *GetAcmeCertificateRequestsCommandInput) (output *models.AcmeCertificateRequestView, resp *http.Response, err error) + AddAcmeCertificateRequestCommand(input *AddAcmeCertificateRequestCommandInput) (output *models.AcmeCertificateRequestView, resp *http.Response, err error) + DeleteAcmeCertificateRequestCommand(input *DeleteAcmeCertificateRequestCommandInput) (output *models.AcmeCertificateRequestView, resp *http.Response, err error) + GetAcmeCertificateRequestCommand(input *GetAcmeCertificateRequestCommandInput) (output *models.AcmeCertificateRequestView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminConfig/adminConfig.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminConfig/adminConfig.go new file mode 100644 index 00000000..882202f6 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminConfig/adminConfig.go @@ -0,0 +1,267 @@ +package adminConfig + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "AdminConfig" +) + +//AdminConfigService provides the API operations for making requests to +// AdminConfig endpoint. +type AdminConfigService struct { + *client.Client +} + +//New createa a new instance of the AdminConfigService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a AdminConfigService from the configuration +// svc := adminConfig.New(cfg) +// +func New(cfg *config.Config) *AdminConfigService { + + return &AdminConfigService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a AdminConfig operation +func (s *AdminConfigService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeleteAdminConfigurationCommand - Resets the Admin Config to default values +//RequestType: DELETE +//Input: +func (s *AdminConfigService) DeleteAdminConfigurationCommand() (resp *http.Response, err error) { + path := "/adminConfig" + op := &request.Operation{ + Name: "DeleteAdminConfigurationCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetAdminConfigurationCommand - Get the Admin Config +//RequestType: GET +//Input: +func (s *AdminConfigService) GetAdminConfigurationCommand() (output *models.AdminConfigurationView, resp *http.Response, err error) { + path := "/adminConfig" + op := &request.Operation{ + Name: "GetAdminConfigurationCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.AdminConfigurationView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateAdminConfigurationCommand - Update the Admin Config +//RequestType: PUT +//Input: input *UpdateAdminConfigurationCommandInput +func (s *AdminConfigService) UpdateAdminConfigurationCommand(input *UpdateAdminConfigurationCommandInput) (output *models.AdminConfigurationView, resp *http.Response, err error) { + path := "/adminConfig" + op := &request.Operation{ + Name: "UpdateAdminConfigurationCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AdminConfigurationView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateAdminConfigurationCommandInput - Inputs for UpdateAdminConfigurationCommand +type UpdateAdminConfigurationCommandInput struct { + Body models.AdminConfigurationView +} + +//GetReplicaAdminsCommand - Get list of ReplicaAdmins +//RequestType: GET +//Input: +func (s *AdminConfigService) GetReplicaAdminsCommand() (output *models.ReplicaAdminsView, resp *http.Response, err error) { + path := "/adminConfig/replicaAdmins" + op := &request.Operation{ + Name: "GetReplicaAdminsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.ReplicaAdminsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//AddReplicaAdminCommand - Add a ReplicaAdmin +//RequestType: POST +//Input: input *AddReplicaAdminCommandInput +func (s *AdminConfigService) AddReplicaAdminCommand(input *AddReplicaAdminCommandInput) (output *models.ReplicaAdminView, resp *http.Response, err error) { + path := "/adminConfig/replicaAdmins" + op := &request.Operation{ + Name: "AddReplicaAdminCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ReplicaAdminView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddReplicaAdminCommandInput - Inputs for AddReplicaAdminCommand +type AddReplicaAdminCommandInput struct { + Body models.ReplicaAdminView +} + +//DeleteReplicaAdminCommand - Delete a ReplicaAdmin +//RequestType: DELETE +//Input: input *DeleteReplicaAdminCommandInput +func (s *AdminConfigService) DeleteReplicaAdminCommand(input *DeleteReplicaAdminCommandInput) (resp *http.Response, err error) { + path := "/adminConfig/replicaAdmins/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteReplicaAdminCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteReplicaAdminCommandInput - Inputs for DeleteReplicaAdminCommand +type DeleteReplicaAdminCommandInput struct { + Id string +} + +//GetReplicaAdminCommand - Get a ReplicaAdmin +//RequestType: GET +//Input: input *GetReplicaAdminCommandInput +func (s *AdminConfigService) GetReplicaAdminCommand(input *GetReplicaAdminCommandInput) (output *models.ReplicaAdminView, resp *http.Response, err error) { + path := "/adminConfig/replicaAdmins/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetReplicaAdminCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ReplicaAdminView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetReplicaAdminCommandInput - Inputs for GetReplicaAdminCommand +type GetReplicaAdminCommandInput struct { + Id string +} + +//UpdateAdminReplicaCommand - Update a ReplicaAdmin +//RequestType: PUT +//Input: input *UpdateAdminReplicaCommandInput +func (s *AdminConfigService) UpdateAdminReplicaCommand(input *UpdateAdminReplicaCommandInput) (output *models.ReplicaAdminView, resp *http.Response, err error) { + path := "/adminConfig/replicaAdmins/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateAdminReplicaCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ReplicaAdminView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateAdminReplicaCommandInput - Inputs for UpdateAdminReplicaCommand +type UpdateAdminReplicaCommandInput struct { + Body models.ReplicaAdminView + Id string +} + +//GetAdminReplicaFileCommand - Get configuration file for a given ReplicaAdmin +//RequestType: POST +//Input: input *GetAdminReplicaFileCommandInput +func (s *AdminConfigService) GetAdminReplicaFileCommand(input *GetAdminReplicaFileCommandInput) (resp *http.Response, err error) { + path := "/adminConfig/replicaAdmins/{id}/config" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetAdminReplicaFileCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// GetAdminReplicaFileCommandInput - Inputs for GetAdminReplicaFileCommand +type GetAdminReplicaFileCommandInput struct { + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminConfig/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminConfig/interface.go new file mode 100644 index 00000000..7982302b --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminConfig/interface.go @@ -0,0 +1,19 @@ +package adminConfig + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type AdminConfigAPI interface { + DeleteAdminConfigurationCommand() (resp *http.Response, err error) + GetAdminConfigurationCommand() (output *models.AdminConfigurationView, resp *http.Response, err error) + UpdateAdminConfigurationCommand(input *UpdateAdminConfigurationCommandInput) (output *models.AdminConfigurationView, resp *http.Response, err error) + GetReplicaAdminsCommand() (output *models.ReplicaAdminsView, resp *http.Response, err error) + AddReplicaAdminCommand(input *AddReplicaAdminCommandInput) (output *models.ReplicaAdminView, resp *http.Response, err error) + DeleteReplicaAdminCommand(input *DeleteReplicaAdminCommandInput) (resp *http.Response, err error) + GetReplicaAdminCommand(input *GetReplicaAdminCommandInput) (output *models.ReplicaAdminView, resp *http.Response, err error) + UpdateAdminReplicaCommand(input *UpdateAdminReplicaCommandInput) (output *models.ReplicaAdminView, resp *http.Response, err error) + GetAdminReplicaFileCommand(input *GetAdminReplicaFileCommandInput) (resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminSessionInfo/adminSessionInfo.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminSessionInfo/adminSessionInfo.go new file mode 100644 index 00000000..81bb78fb --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminSessionInfo/adminSessionInfo.go @@ -0,0 +1,107 @@ +package adminSessionInfo + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "AdminSessionInfo" +) + +//AdminSessionInfoService provides the API operations for making requests to +// AdminSessionInfo endpoint. +type AdminSessionInfoService struct { + *client.Client +} + +//New createa a new instance of the AdminSessionInfoService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a AdminSessionInfoService from the configuration +// svc := adminSessionInfo.New(cfg) +// +func New(cfg *config.Config) *AdminSessionInfoService { + + return &AdminSessionInfoService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a AdminSessionInfo operation +func (s *AdminSessionInfoService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//AdminSessionDeleteCommand - Invalidate the Admin session information +//RequestType: DELETE +//Input: +func (s *AdminSessionInfoService) AdminSessionDeleteCommand() (resp *http.Response, err error) { + path := "/adminSessionInfo" + op := &request.Operation{ + Name: "AdminSessionDeleteCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//AdminSessionInfoCommand - Return the Admin session information +//RequestType: GET +//Input: +func (s *AdminSessionInfoService) AdminSessionInfoCommand() (output *models.SessionInfo, resp *http.Response, err error) { + path := "/adminSessionInfo" + op := &request.Operation{ + Name: "AdminSessionInfoCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.SessionInfo{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//AdminSessionInfoCheckCommand - Return the Admin session information without affecting session expiration +//RequestType: GET +//Input: +func (s *AdminSessionInfoService) AdminSessionInfoCheckCommand() (output *models.SessionInfo, resp *http.Response, err error) { + path := "/adminSessionInfo/checkOnly" + op := &request.Operation{ + Name: "AdminSessionInfoCheckCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.SessionInfo{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminSessionInfo/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminSessionInfo/interface.go new file mode 100644 index 00000000..4063f209 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/adminSessionInfo/interface.go @@ -0,0 +1,13 @@ +package adminSessionInfo + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type AdminSessionInfoAPI interface { + AdminSessionDeleteCommand() (resp *http.Response, err error) + AdminSessionInfoCommand() (output *models.SessionInfo, resp *http.Response, err error) + AdminSessionInfoCheckCommand() (output *models.SessionInfo, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/agents/agents.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/agents/agents.go new file mode 100644 index 00000000..ad02b131 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/agents/agents.go @@ -0,0 +1,289 @@ +package agents + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Agents" +) + +//AgentsService provides the API operations for making requests to +// Agents endpoint. +type AgentsService struct { + *client.Client +} + +//New createa a new instance of the AgentsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a AgentsService from the configuration +// svc := agents.New(cfg) +// +func New(cfg *config.Config) *AgentsService { + + return &AgentsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Agents operation +func (s *AgentsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetAgentsCommand - Get all Agents +//RequestType: GET +//Input: input *GetAgentsCommandInput +func (s *AgentsService) GetAgentsCommand(input *GetAgentsCommandInput) (output *models.AgentsView, resp *http.Response, err error) { + path := "/agents" + op := &request.Operation{ + Name: "GetAgentsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.AgentsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAgentsCommandInput - Inputs for GetAgentsCommand +type GetAgentsCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddAgentCommand - Add an Agent +//RequestType: POST +//Input: input *AddAgentCommandInput +func (s *AgentsService) AddAgentCommand(input *AddAgentCommandInput) (output *models.AgentView, resp *http.Response, err error) { + path := "/agents" + op := &request.Operation{ + Name: "AddAgentCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AgentView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddAgentCommandInput - Inputs for AddAgentCommand +type AddAgentCommandInput struct { + Body models.AgentView +} + +//GetAgentCertificatesCommand - Get all Agent Certificates +//RequestType: GET +//Input: input *GetAgentCertificatesCommandInput +func (s *AgentsService) GetAgentCertificatesCommand(input *GetAgentCertificatesCommandInput) (output *models.AgentCertificatesView, resp *http.Response, err error) { + path := "/agents/certificates" + op := &request.Operation{ + Name: "GetAgentCertificatesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "alias": input.Alias, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.AgentCertificatesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAgentCertificatesCommandInput - Inputs for GetAgentCertificatesCommand +type GetAgentCertificatesCommandInput struct { + Page string + NumberPerPage string + Filter string + Alias string + SortKey string + Order string +} + +//GetAgentCertificateCommand - Get an Agent Certificate +//RequestType: GET +//Input: input *GetAgentCertificateCommandInput +func (s *AgentsService) GetAgentCertificateCommand(input *GetAgentCertificateCommandInput) (output *models.AgentCertificateView, resp *http.Response, err error) { + path := "/agents/certificates/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetAgentCertificateCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AgentCertificateView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAgentCertificateCommandInput - Inputs for GetAgentCertificateCommand +type GetAgentCertificateCommandInput struct { + Id string +} + +//GetAgentFileCommand - Get a configuration file for an Agent +//RequestType: GET +//Input: input *GetAgentFileCommandInput +func (s *AgentsService) GetAgentFileCommand(input *GetAgentFileCommandInput) (resp *http.Response, err error) { + path := "/agents/{agentId}/config/{sharedSecretId}" + path = strings.Replace(path, "{agentId}", input.AgentId, -1) + + path = strings.Replace(path, "{sharedSecretId}", input.SharedSecretId, -1) + + op := &request.Operation{ + Name: "GetAgentFileCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// GetAgentFileCommandInput - Inputs for GetAgentFileCommand +type GetAgentFileCommandInput struct { + AgentId string + SharedSecretId string +} + +//DeleteAgentCommand - Delete an Agent +//RequestType: DELETE +//Input: input *DeleteAgentCommandInput +func (s *AgentsService) DeleteAgentCommand(input *DeleteAgentCommandInput) (resp *http.Response, err error) { + path := "/agents/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteAgentCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteAgentCommandInput - Inputs for DeleteAgentCommand +type DeleteAgentCommandInput struct { + Id string +} + +//GetAgentCommand - Get an Agent +//RequestType: GET +//Input: input *GetAgentCommandInput +func (s *AgentsService) GetAgentCommand(input *GetAgentCommandInput) (output *models.AgentView, resp *http.Response, err error) { + path := "/agents/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetAgentCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AgentView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAgentCommandInput - Inputs for GetAgentCommand +type GetAgentCommandInput struct { + Id string +} + +//UpdateAgentCommand - Update an Agent +//RequestType: PUT +//Input: input *UpdateAgentCommandInput +func (s *AgentsService) UpdateAgentCommand(input *UpdateAgentCommandInput) (output *models.AgentView, resp *http.Response, err error) { + path := "/agents/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateAgentCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AgentView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateAgentCommandInput - Inputs for UpdateAgentCommand +type UpdateAgentCommandInput struct { + Body models.AgentView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/agents/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/agents/interface.go new file mode 100644 index 00000000..b87f3ff0 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/agents/interface.go @@ -0,0 +1,18 @@ +package agents + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type AgentsAPI interface { + GetAgentsCommand(input *GetAgentsCommandInput) (output *models.AgentsView, resp *http.Response, err error) + AddAgentCommand(input *AddAgentCommandInput) (output *models.AgentView, resp *http.Response, err error) + GetAgentCertificatesCommand(input *GetAgentCertificatesCommandInput) (output *models.AgentCertificatesView, resp *http.Response, err error) + GetAgentCertificateCommand(input *GetAgentCertificateCommandInput) (output *models.AgentCertificateView, resp *http.Response, err error) + GetAgentFileCommand(input *GetAgentFileCommandInput) (resp *http.Response, err error) + DeleteAgentCommand(input *DeleteAgentCommandInput) (resp *http.Response, err error) + GetAgentCommand(input *GetAgentCommandInput) (output *models.AgentView, resp *http.Response, err error) + UpdateAgentCommand(input *UpdateAgentCommandInput) (output *models.AgentView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/applications/applications.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/applications/applications.go new file mode 100644 index 00000000..18662cde --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/applications/applications.go @@ -0,0 +1,542 @@ +package applications + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Applications" +) + +//ApplicationsService provides the API operations for making requests to +// Applications endpoint. +type ApplicationsService struct { + *client.Client +} + +//New createa a new instance of the ApplicationsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a ApplicationsService from the configuration +// svc := applications.New(cfg) +// +func New(cfg *config.Config) *ApplicationsService { + + return &ApplicationsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Applications operation +func (s *ApplicationsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetApplicationsCommand - Get all Applications +//RequestType: GET +//Input: input *GetApplicationsCommandInput +func (s *ApplicationsService) GetApplicationsCommand(input *GetApplicationsCommandInput) (output *models.ApplicationsView, resp *http.Response, err error) { + path := "/applications" + op := &request.Operation{ + Name: "GetApplicationsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "siteId": input.SiteId, + "numberPerPage": input.NumberPerPage, + "agentId": input.AgentId, + "virtualHostId": input.VirtualHostId, + "ruleId": input.RuleId, + "rulesetId": input.RulesetId, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.ApplicationsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetApplicationsCommandInput - Inputs for GetApplicationsCommand +type GetApplicationsCommandInput struct { + Page string + SiteId string + NumberPerPage string + AgentId string + VirtualHostId string + RuleId string + RulesetId string + Filter string + Name string + SortKey string + Order string +} + +//AddApplicationCommand - Add an Application +//RequestType: POST +//Input: input *AddApplicationCommandInput +func (s *ApplicationsService) AddApplicationCommand(input *AddApplicationCommandInput) (output *models.ApplicationView, resp *http.Response, err error) { + path := "/applications" + op := &request.Operation{ + Name: "AddApplicationCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ApplicationView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddApplicationCommandInput - Inputs for AddApplicationCommand +type AddApplicationCommandInput struct { + Body models.ApplicationView +} + +//DeleteReservedApplicationCommand - Resets the Reserved Application configuration to default values +//RequestType: DELETE +//Input: +func (s *ApplicationsService) DeleteReservedApplicationCommand() (resp *http.Response, err error) { + path := "/applications/reserved" + op := &request.Operation{ + Name: "DeleteReservedApplicationCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetReservedApplicationCommand - Get Reserved Application configuration +//RequestType: GET +//Input: +func (s *ApplicationsService) GetReservedApplicationCommand() (output *models.ReservedApplicationView, resp *http.Response, err error) { + path := "/applications/reserved" + op := &request.Operation{ + Name: "GetReservedApplicationCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.ReservedApplicationView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateReservedApplicationCommand - Update Reserved Application configuration +//RequestType: PUT +//Input: input *UpdateReservedApplicationCommandInput +func (s *ApplicationsService) UpdateReservedApplicationCommand(input *UpdateReservedApplicationCommandInput) (output *models.ReservedApplicationView, resp *http.Response, err error) { + path := "/applications/reserved" + op := &request.Operation{ + Name: "UpdateReservedApplicationCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ReservedApplicationView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateReservedApplicationCommandInput - Inputs for UpdateReservedApplicationCommand +type UpdateReservedApplicationCommandInput struct { + Body models.ReservedApplicationView +} + +//GetResourcesCommand - Get all Resources +//RequestType: GET +//Input: input *GetResourcesCommandInput +func (s *ApplicationsService) GetResourcesCommand(input *GetResourcesCommandInput) (output *models.ResourcesView, resp *http.Response, err error) { + path := "/applications/resources" + op := &request.Operation{ + Name: "GetResourcesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "ruleId": input.RuleId, + "rulesetId": input.RulesetId, + "name": input.Name, + "filter": input.Filter, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.ResourcesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetResourcesCommandInput - Inputs for GetResourcesCommand +type GetResourcesCommandInput struct { + Page string + NumberPerPage string + RuleId string + RulesetId string + Name string + Filter string + SortKey string + Order string +} + +//GetApplicationsResourcesMethodsCommand - Get Application Resource Methods +//RequestType: GET +//Input: +func (s *ApplicationsService) GetApplicationsResourcesMethodsCommand() (output *models.MethodsView, resp *http.Response, err error) { + path := "/applications/resources/methods" + op := &request.Operation{ + Name: "GetApplicationsResourcesMethodsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.MethodsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//DeleteApplicationResourceCommand - Delete an Application Resource +//RequestType: DELETE +//Input: input *DeleteApplicationResourceCommandInput +func (s *ApplicationsService) DeleteApplicationResourceCommand(input *DeleteApplicationResourceCommandInput) (resp *http.Response, err error) { + path := "/applications/{applicationId}/resources/{resourceId}" + path = strings.Replace(path, "{applicationId}", input.ApplicationId, -1) + + path = strings.Replace(path, "{resourceId}", input.ResourceId, -1) + + op := &request.Operation{ + Name: "DeleteApplicationResourceCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteApplicationResourceCommandInput - Inputs for DeleteApplicationResourceCommand +type DeleteApplicationResourceCommandInput struct { + ApplicationId string + ResourceId string +} + +//GetApplicationResourceCommand - Get an Application Resource +//RequestType: GET +//Input: input *GetApplicationResourceCommandInput +func (s *ApplicationsService) GetApplicationResourceCommand(input *GetApplicationResourceCommandInput) (output *models.ResourceView, resp *http.Response, err error) { + path := "/applications/{applicationId}/resources/{resourceId}" + path = strings.Replace(path, "{applicationId}", input.ApplicationId, -1) + + path = strings.Replace(path, "{resourceId}", input.ResourceId, -1) + + op := &request.Operation{ + Name: "GetApplicationResourceCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ResourceView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetApplicationResourceCommandInput - Inputs for GetApplicationResourceCommand +type GetApplicationResourceCommandInput struct { + ApplicationId string + ResourceId string +} + +//UpdateApplicationResourceCommand - Update an Application Resource +//RequestType: PUT +//Input: input *UpdateApplicationResourceCommandInput +func (s *ApplicationsService) UpdateApplicationResourceCommand(input *UpdateApplicationResourceCommandInput) (output *models.ResourceView, resp *http.Response, err error) { + path := "/applications/{applicationId}/resources/{resourceId}" + path = strings.Replace(path, "{applicationId}", input.ApplicationId, -1) + + path = strings.Replace(path, "{resourceId}", input.ResourceId, -1) + + op := &request.Operation{ + Name: "UpdateApplicationResourceCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ResourceView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateApplicationResourceCommandInput - Inputs for UpdateApplicationResourceCommand +type UpdateApplicationResourceCommandInput struct { + Body models.ResourceView + ApplicationId string + ResourceId string +} + +//DeleteApplicationCommand - Delete an Application +//RequestType: DELETE +//Input: input *DeleteApplicationCommandInput +func (s *ApplicationsService) DeleteApplicationCommand(input *DeleteApplicationCommandInput) (output *models.ApplicationView, resp *http.Response, err error) { + path := "/applications/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteApplicationCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ApplicationView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// DeleteApplicationCommandInput - Inputs for DeleteApplicationCommand +type DeleteApplicationCommandInput struct { + Id string +} + +//GetApplicationCommand - Get an Application +//RequestType: GET +//Input: input *GetApplicationCommandInput +func (s *ApplicationsService) GetApplicationCommand(input *GetApplicationCommandInput) (output *models.ApplicationView, resp *http.Response, err error) { + path := "/applications/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetApplicationCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ApplicationView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetApplicationCommandInput - Inputs for GetApplicationCommand +type GetApplicationCommandInput struct { + Id string +} + +//UpdateApplicationCommand - Update an Application +//RequestType: PUT +//Input: input *UpdateApplicationCommandInput +func (s *ApplicationsService) UpdateApplicationCommand(input *UpdateApplicationCommandInput) (output *models.ApplicationView, resp *http.Response, err error) { + path := "/applications/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateApplicationCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ApplicationView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateApplicationCommandInput - Inputs for UpdateApplicationCommand +type UpdateApplicationCommandInput struct { + Body models.ApplicationView + Id string +} + +//GetResourceMatchingEvaluationOrderCommand - Get the resource path ordering for an Application +//RequestType: GET +//Input: input *GetResourceMatchingEvaluationOrderCommandInput +func (s *ApplicationsService) GetResourceMatchingEvaluationOrderCommand(input *GetResourceMatchingEvaluationOrderCommandInput) (output *models.ResourceMatchingEvaluationOrderView, resp *http.Response, err error) { + path := "/applications/{id}/resourceMatchingEvaluationOrder" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetResourceMatchingEvaluationOrderCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ResourceMatchingEvaluationOrderView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetResourceMatchingEvaluationOrderCommandInput - Inputs for GetResourceMatchingEvaluationOrderCommand +type GetResourceMatchingEvaluationOrderCommandInput struct { + Id string +} + +//GetApplicationResourcesCommand - Get Resources for an Application +//RequestType: GET +//Input: input *GetApplicationResourcesCommandInput +func (s *ApplicationsService) GetApplicationResourcesCommand(input *GetApplicationResourcesCommandInput) (output *models.ResourcesView, resp *http.Response, err error) { + path := "/applications/{id}/resources" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetApplicationResourcesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "name": input.Name, + "filter": input.Filter, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.ResourcesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetApplicationResourcesCommandInput - Inputs for GetApplicationResourcesCommand +type GetApplicationResourcesCommandInput struct { + Page string + NumberPerPage string + Name string + Filter string + SortKey string + Order string + + Id string +} + +//AddApplicationResourceCommand - Add Resource to an Application +//RequestType: POST +//Input: input *AddApplicationResourceCommandInput +func (s *ApplicationsService) AddApplicationResourceCommand(input *AddApplicationResourceCommandInput) (output *models.ResourceView, resp *http.Response, err error) { + path := "/applications/{id}/resources" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "AddApplicationResourceCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ResourceView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddApplicationResourceCommandInput - Inputs for AddApplicationResourceCommand +type AddApplicationResourceCommandInput struct { + Body models.ResourceView + Id string +} + +//GetResourceAutoOrderCommand - Computes an automatic, intelligent resource ordering for an Application. +//RequestType: GET +//Input: input *GetResourceAutoOrderCommandInput +func (s *ApplicationsService) GetResourceAutoOrderCommand(input *GetResourceAutoOrderCommandInput) (output *models.ResourceOrderView, resp *http.Response, err error) { + path := "/applications/{id}/resources/autoOrder" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetResourceAutoOrderCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ResourceOrderView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetResourceAutoOrderCommandInput - Inputs for GetResourceAutoOrderCommand +type GetResourceAutoOrderCommandInput struct { + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/applications/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/applications/interface.go new file mode 100644 index 00000000..30f83662 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/applications/interface.go @@ -0,0 +1,27 @@ +package applications + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type ApplicationsAPI interface { + GetApplicationsCommand(input *GetApplicationsCommandInput) (output *models.ApplicationsView, resp *http.Response, err error) + AddApplicationCommand(input *AddApplicationCommandInput) (output *models.ApplicationView, resp *http.Response, err error) + DeleteReservedApplicationCommand() (resp *http.Response, err error) + GetReservedApplicationCommand() (output *models.ReservedApplicationView, resp *http.Response, err error) + UpdateReservedApplicationCommand(input *UpdateReservedApplicationCommandInput) (output *models.ReservedApplicationView, resp *http.Response, err error) + GetResourcesCommand(input *GetResourcesCommandInput) (output *models.ResourcesView, resp *http.Response, err error) + GetApplicationsResourcesMethodsCommand() (output *models.MethodsView, resp *http.Response, err error) + DeleteApplicationResourceCommand(input *DeleteApplicationResourceCommandInput) (resp *http.Response, err error) + GetApplicationResourceCommand(input *GetApplicationResourceCommandInput) (output *models.ResourceView, resp *http.Response, err error) + UpdateApplicationResourceCommand(input *UpdateApplicationResourceCommandInput) (output *models.ResourceView, resp *http.Response, err error) + DeleteApplicationCommand(input *DeleteApplicationCommandInput) (output *models.ApplicationView, resp *http.Response, err error) + GetApplicationCommand(input *GetApplicationCommandInput) (output *models.ApplicationView, resp *http.Response, err error) + UpdateApplicationCommand(input *UpdateApplicationCommandInput) (output *models.ApplicationView, resp *http.Response, err error) + GetResourceMatchingEvaluationOrderCommand(input *GetResourceMatchingEvaluationOrderCommandInput) (output *models.ResourceMatchingEvaluationOrderView, resp *http.Response, err error) + GetApplicationResourcesCommand(input *GetApplicationResourcesCommandInput) (output *models.ResourcesView, resp *http.Response, err error) + AddApplicationResourceCommand(input *AddApplicationResourceCommandInput) (output *models.ResourceView, resp *http.Response, err error) + GetResourceAutoOrderCommand(input *GetResourceAutoOrderCommandInput) (output *models.ResourceOrderView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/auth/auth.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/auth/auth.go new file mode 100644 index 00000000..ec9f898d --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/auth/auth.go @@ -0,0 +1,302 @@ +package auth + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Auth" +) + +//AuthService provides the API operations for making requests to +// Auth endpoint. +type AuthService struct { + *client.Client +} + +//New createa a new instance of the AuthService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a AuthService from the configuration +// svc := auth.New(cfg) +// +func New(cfg *config.Config) *AuthService { + + return &AuthService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Auth operation +func (s *AuthService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeleteBasicAuthCommand - Resets the HTTP Basic Authentication configuration to default values +//RequestType: DELETE +//Input: +func (s *AuthService) DeleteBasicAuthCommand() (resp *http.Response, err error) { + path := "/auth/basic" + op := &request.Operation{ + Name: "DeleteBasicAuthCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetBasicAuthCommand - Get the HTTP Basic Authentication configuration +//RequestType: GET +//Input: +func (s *AuthService) GetBasicAuthCommand() (output *models.BasicConfig, resp *http.Response, err error) { + path := "/auth/basic" + op := &request.Operation{ + Name: "GetBasicAuthCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.BasicConfig{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateBasicAuthCommand - Update the Basic Authentication configuration +//RequestType: PUT +//Input: input *UpdateBasicAuthCommandInput +func (s *AuthService) UpdateBasicAuthCommand(input *UpdateBasicAuthCommandInput) (output *models.BasicAuthConfigView, resp *http.Response, err error) { + path := "/auth/basic" + op := &request.Operation{ + Name: "UpdateBasicAuthCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.BasicAuthConfigView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateBasicAuthCommandInput - Inputs for UpdateBasicAuthCommand +type UpdateBasicAuthCommandInput struct { + Body models.BasicAuthConfigView +} + +//DeleteOAuthAuthCommand - Resets the OAuth Authentication configuration to default values +//RequestType: DELETE +//Input: +func (s *AuthService) DeleteOAuthAuthCommand() (resp *http.Response, err error) { + path := "/auth/oauth" + op := &request.Operation{ + Name: "DeleteOAuthAuthCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetOAuthAuthCommand - Get the OAuth Authentication configuration +//RequestType: GET +//Input: +func (s *AuthService) GetOAuthAuthCommand() (output *models.OAuthConfigView, resp *http.Response, err error) { + path := "/auth/oauth" + op := &request.Operation{ + Name: "GetOAuthAuthCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.OAuthConfigView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateOAuthAuthCommand - Update the OAuth Authentication configuration +//RequestType: PUT +//Input: input *UpdateOAuthAuthCommandInput +func (s *AuthService) UpdateOAuthAuthCommand(input *UpdateOAuthAuthCommandInput) (output *models.OAuthConfigView, resp *http.Response, err error) { + path := "/auth/oauth" + op := &request.Operation{ + Name: "UpdateOAuthAuthCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.OAuthConfigView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateOAuthAuthCommandInput - Inputs for UpdateOAuthAuthCommand +type UpdateOAuthAuthCommandInput struct { + Body models.OAuthConfigView +} + +//DeleteOidcAuthCommand - Resets the OIDC Authentication configuration to default values +//RequestType: DELETE +//Input: +func (s *AuthService) DeleteOidcAuthCommand() (resp *http.Response, err error) { + path := "/auth/oidc" + op := &request.Operation{ + Name: "DeleteOidcAuthCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetOidcAuthCommand - Get the OIDC Authentication configuration +//RequestType: GET +//Input: +func (s *AuthService) GetOidcAuthCommand() (output *models.OidcConfigView, resp *http.Response, err error) { + path := "/auth/oidc" + op := &request.Operation{ + Name: "GetOidcAuthCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.OidcConfigView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateOidcAuthCommand - Update the OIDC Authentication configuration +//RequestType: PUT +//Input: input *UpdateOidcAuthCommandInput +func (s *AuthService) UpdateOidcAuthCommand(input *UpdateOidcAuthCommandInput) (output *models.OidcConfigView, resp *http.Response, err error) { + path := "/auth/oidc" + op := &request.Operation{ + Name: "UpdateOidcAuthCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.OidcConfigView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateOidcAuthCommandInput - Inputs for UpdateOidcAuthCommand +type UpdateOidcAuthCommandInput struct { + Body models.OidcConfigView +} + +//DeleteAdminBasicWebSessionCommand - Resets the Admin Web Session configuration to default values +//RequestType: DELETE +//Input: +func (s *AuthService) DeleteAdminBasicWebSessionCommand() (resp *http.Response, err error) { + path := "/auth/webSession" + op := &request.Operation{ + Name: "DeleteAdminBasicWebSessionCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetAdminBasicWebSessionCommand - Get the admin web session configuration +//RequestType: GET +//Input: +func (s *AuthService) GetAdminBasicWebSessionCommand() (output *models.AdminBasicWebSessionView, resp *http.Response, err error) { + path := "/auth/webSession" + op := &request.Operation{ + Name: "GetAdminBasicWebSessionCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.AdminBasicWebSessionView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateAdminBasicWebSessionCommand - Update the admin web session configuration +//RequestType: PUT +//Input: input *UpdateAdminBasicWebSessionCommandInput +func (s *AuthService) UpdateAdminBasicWebSessionCommand(input *UpdateAdminBasicWebSessionCommandInput) (output *models.AdminBasicWebSessionView, resp *http.Response, err error) { + path := "/auth/webSession" + op := &request.Operation{ + Name: "UpdateAdminBasicWebSessionCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AdminBasicWebSessionView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateAdminBasicWebSessionCommandInput - Inputs for UpdateAdminBasicWebSessionCommand +type UpdateAdminBasicWebSessionCommandInput struct { + Body models.AdminBasicWebSessionView +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/auth/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/auth/interface.go new file mode 100644 index 00000000..2638b22b --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/auth/interface.go @@ -0,0 +1,22 @@ +package auth + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type AuthAPI interface { + DeleteBasicAuthCommand() (resp *http.Response, err error) + GetBasicAuthCommand() (output *models.BasicConfig, resp *http.Response, err error) + UpdateBasicAuthCommand(input *UpdateBasicAuthCommandInput) (output *models.BasicAuthConfigView, resp *http.Response, err error) + DeleteOAuthAuthCommand() (resp *http.Response, err error) + GetOAuthAuthCommand() (output *models.OAuthConfigView, resp *http.Response, err error) + UpdateOAuthAuthCommand(input *UpdateOAuthAuthCommandInput) (output *models.OAuthConfigView, resp *http.Response, err error) + DeleteOidcAuthCommand() (resp *http.Response, err error) + GetOidcAuthCommand() (output *models.OidcConfigView, resp *http.Response, err error) + UpdateOidcAuthCommand(input *UpdateOidcAuthCommandInput) (output *models.OidcConfigView, resp *http.Response, err error) + DeleteAdminBasicWebSessionCommand() (resp *http.Response, err error) + GetAdminBasicWebSessionCommand() (output *models.AdminBasicWebSessionView, resp *http.Response, err error) + UpdateAdminBasicWebSessionCommand(input *UpdateAdminBasicWebSessionCommandInput) (output *models.AdminBasicWebSessionView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement/authTokenManagement.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement/authTokenManagement.go new file mode 100644 index 00000000..d8f51363 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement/authTokenManagement.go @@ -0,0 +1,157 @@ +package authTokenManagement + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "AuthTokenManagement" +) + +//AuthTokenManagementService provides the API operations for making requests to +// AuthTokenManagement endpoint. +type AuthTokenManagementService struct { + *client.Client +} + +//New createa a new instance of the AuthTokenManagementService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a AuthTokenManagementService from the configuration +// svc := authTokenManagement.New(cfg) +// +func New(cfg *config.Config) *AuthTokenManagementService { + + return &AuthTokenManagementService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a AuthTokenManagement operation +func (s *AuthTokenManagementService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeleteAuthTokenManagementCommand - Resets the Auth Token Management configuration to default values +//RequestType: DELETE +//Input: +func (s *AuthTokenManagementService) DeleteAuthTokenManagementCommand() (resp *http.Response, err error) { + path := "/authTokenManagement" + op := &request.Operation{ + Name: "DeleteAuthTokenManagementCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetAuthTokenManagementCommand - Get the Auth Token Management configuration +//RequestType: GET +//Input: +func (s *AuthTokenManagementService) GetAuthTokenManagementCommand() (output *models.AuthTokenManagementView, resp *http.Response, err error) { + path := "/authTokenManagement" + op := &request.Operation{ + Name: "GetAuthTokenManagementCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.AuthTokenManagementView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateAuthTokenManagementCommand - Update the Auth Token Management configuration +//RequestType: PUT +//Input: input *UpdateAuthTokenManagementCommandInput +func (s *AuthTokenManagementService) UpdateAuthTokenManagementCommand(input *UpdateAuthTokenManagementCommandInput) (output *models.AuthTokenManagementView, resp *http.Response, err error) { + path := "/authTokenManagement" + op := &request.Operation{ + Name: "UpdateAuthTokenManagementCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AuthTokenManagementView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateAuthTokenManagementCommandInput - Inputs for UpdateAuthTokenManagementCommand +type UpdateAuthTokenManagementCommandInput struct { + Body models.AuthTokenManagementView +} + +//GetAuthTokenKeySetCommand - Get the Auth Token key set configuration +//RequestType: GET +//Input: +func (s *AuthTokenManagementService) GetAuthTokenKeySetCommand() (output *models.KeySetView, resp *http.Response, err error) { + path := "/authTokenManagement/keySet" + op := &request.Operation{ + Name: "GetAuthTokenKeySetCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.KeySetView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateAuthTokenKeySetCommand - Update the AuthToken key set configuration +//RequestType: PUT +//Input: input *UpdateAuthTokenKeySetCommandInput +func (s *AuthTokenManagementService) UpdateAuthTokenKeySetCommand(input *UpdateAuthTokenKeySetCommandInput) (output *models.KeySetView, resp *http.Response, err error) { + path := "/authTokenManagement/keySet" + op := &request.Operation{ + Name: "UpdateAuthTokenKeySetCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.KeySetView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateAuthTokenKeySetCommandInput - Inputs for UpdateAuthTokenKeySetCommand +type UpdateAuthTokenKeySetCommandInput struct { + Body models.KeySetView +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement/interface.go new file mode 100644 index 00000000..053d1584 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement/interface.go @@ -0,0 +1,15 @@ +package authTokenManagement + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type AuthTokenManagementAPI interface { + DeleteAuthTokenManagementCommand() (resp *http.Response, err error) + GetAuthTokenManagementCommand() (output *models.AuthTokenManagementView, resp *http.Response, err error) + UpdateAuthTokenManagementCommand(input *UpdateAuthTokenManagementCommandInput) (output *models.AuthTokenManagementView, resp *http.Response, err error) + GetAuthTokenKeySetCommand() (output *models.KeySetView, resp *http.Response, err error) + UpdateAuthTokenKeySetCommand(input *UpdateAuthTokenKeySetCommandInput) (output *models.KeySetView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists/authnReqLists.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists/authnReqLists.go new file mode 100644 index 00000000..7ea9d1df --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists/authnReqLists.go @@ -0,0 +1,195 @@ +package authnReqLists + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "AuthnReqLists" +) + +//AuthnReqListsService provides the API operations for making requests to +// AuthnReqLists endpoint. +type AuthnReqListsService struct { + *client.Client +} + +//New createa a new instance of the AuthnReqListsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a AuthnReqListsService from the configuration +// svc := authnReqLists.New(cfg) +// +func New(cfg *config.Config) *AuthnReqListsService { + + return &AuthnReqListsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a AuthnReqLists operation +func (s *AuthnReqListsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetAuthnReqListsCommand - Get all Authentication Requirement Lists +//RequestType: GET +//Input: input *GetAuthnReqListsCommandInput +func (s *AuthnReqListsService) GetAuthnReqListsCommand(input *GetAuthnReqListsCommandInput) (output *models.AuthnReqListsView, resp *http.Response, err error) { + path := "/authnReqLists" + op := &request.Operation{ + Name: "GetAuthnReqListsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.AuthnReqListsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAuthnReqListsCommandInput - Inputs for GetAuthnReqListsCommand +type GetAuthnReqListsCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddAuthnReqListCommand - Add an Authentication Requirement List +//RequestType: POST +//Input: input *AddAuthnReqListCommandInput +func (s *AuthnReqListsService) AddAuthnReqListCommand(input *AddAuthnReqListCommandInput) (output *models.AuthnReqListView, resp *http.Response, err error) { + path := "/authnReqLists" + op := &request.Operation{ + Name: "AddAuthnReqListCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AuthnReqListView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddAuthnReqListCommandInput - Inputs for AddAuthnReqListCommand +type AddAuthnReqListCommandInput struct { + Body models.AuthnReqListView +} + +//DeleteAuthnReqListCommand - Delete an Authentication Requirement List +//RequestType: DELETE +//Input: input *DeleteAuthnReqListCommandInput +func (s *AuthnReqListsService) DeleteAuthnReqListCommand(input *DeleteAuthnReqListCommandInput) (resp *http.Response, err error) { + path := "/authnReqLists/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteAuthnReqListCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteAuthnReqListCommandInput - Inputs for DeleteAuthnReqListCommand +type DeleteAuthnReqListCommandInput struct { + Id string +} + +//GetAuthnReqListCommand - Get an Authentication Requirement List +//RequestType: GET +//Input: input *GetAuthnReqListCommandInput +func (s *AuthnReqListsService) GetAuthnReqListCommand(input *GetAuthnReqListCommandInput) (output *models.AuthnReqListView, resp *http.Response, err error) { + path := "/authnReqLists/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetAuthnReqListCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AuthnReqListView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAuthnReqListCommandInput - Inputs for GetAuthnReqListCommand +type GetAuthnReqListCommandInput struct { + Id string +} + +//UpdateAuthnReqListCommand - Update an Authentication Requirement List +//RequestType: PUT +//Input: input *UpdateAuthnReqListCommandInput +func (s *AuthnReqListsService) UpdateAuthnReqListCommand(input *UpdateAuthnReqListCommandInput) (output *models.AuthnReqListView, resp *http.Response, err error) { + path := "/authnReqLists/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateAuthnReqListCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AuthnReqListView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateAuthnReqListCommandInput - Inputs for UpdateAuthnReqListCommand +type UpdateAuthnReqListCommandInput struct { + Body models.AuthnReqListView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists/interface.go new file mode 100644 index 00000000..7cb04a76 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists/interface.go @@ -0,0 +1,15 @@ +package authnReqLists + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type AuthnReqListsAPI interface { + GetAuthnReqListsCommand(input *GetAuthnReqListsCommandInput) (output *models.AuthnReqListsView, resp *http.Response, err error) + AddAuthnReqListCommand(input *AddAuthnReqListCommandInput) (output *models.AuthnReqListView, resp *http.Response, err error) + DeleteAuthnReqListCommand(input *DeleteAuthnReqListCommandInput) (resp *http.Response, err error) + GetAuthnReqListCommand(input *GetAuthnReqListCommandInput) (output *models.AuthnReqListView, resp *http.Response, err error) + UpdateAuthnReqListCommand(input *UpdateAuthnReqListCommandInput) (output *models.AuthnReqListView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/backup/backup.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/backup/backup.go new file mode 100644 index 00000000..4d039816 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/backup/backup.go @@ -0,0 +1,68 @@ +package backup + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Backup" +) + +//BackupService provides the API operations for making requests to +// Backup endpoint. +type BackupService struct { + *client.Client +} + +//New createa a new instance of the BackupService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a BackupService from the configuration +// svc := backup.New(cfg) +// +func New(cfg *config.Config) *BackupService { + + return &BackupService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Backup operation +func (s *BackupService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//BackupCommand - Create a local database backup +//RequestType: GET +//Input: +func (s *BackupService) BackupCommand() (resp *http.Response, err error) { + path := "/backup" + op := &request.Operation{ + Name: "BackupCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/backup/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/backup/interface.go new file mode 100644 index 00000000..0edce903 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/backup/interface.go @@ -0,0 +1,7 @@ +package backup + +import "net/http" + +type BackupAPI interface { + BackupCommand() (resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/certificates/certificates.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/certificates/certificates.go new file mode 100644 index 00000000..83fbda7d --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/certificates/certificates.go @@ -0,0 +1,222 @@ +package certificates + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Certificates" +) + +//CertificatesService provides the API operations for making requests to +// Certificates endpoint. +type CertificatesService struct { + *client.Client +} + +//New createa a new instance of the CertificatesService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a CertificatesService from the configuration +// svc := certificates.New(cfg) +// +func New(cfg *config.Config) *CertificatesService { + + return &CertificatesService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Certificates operation +func (s *CertificatesService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetTrustedCerts - Get all Certificates +//RequestType: GET +//Input: input *GetTrustedCertsInput +func (s *CertificatesService) GetTrustedCerts(input *GetTrustedCertsInput) (output *models.TrustedCertsView, resp *http.Response, err error) { + path := "/certificates" + op := &request.Operation{ + Name: "GetTrustedCerts", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "alias": input.Alias, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.TrustedCertsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetTrustedCertsInput - Inputs for GetTrustedCerts +type GetTrustedCertsInput struct { + Page string + NumberPerPage string + Filter string + Alias string + SortKey string + Order string +} + +//ImportTrustedCert - Import a Certificate +//RequestType: POST +//Input: input *ImportTrustedCertInput +func (s *CertificatesService) ImportTrustedCert(input *ImportTrustedCertInput) (output *models.TrustedCertView, resp *http.Response, err error) { + path := "/certificates" + op := &request.Operation{ + Name: "ImportTrustedCert", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.TrustedCertView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// ImportTrustedCertInput - Inputs for ImportTrustedCert +type ImportTrustedCertInput struct { + Body models.X509FileImportDocView +} + +//DeleteTrustedCertCommand - Delete a Certificate +//RequestType: DELETE +//Input: input *DeleteTrustedCertCommandInput +func (s *CertificatesService) DeleteTrustedCertCommand(input *DeleteTrustedCertCommandInput) (resp *http.Response, err error) { + path := "/certificates/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteTrustedCertCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteTrustedCertCommandInput - Inputs for DeleteTrustedCertCommand +type DeleteTrustedCertCommandInput struct { + Id string +} + +//GetTrustedCert - Get a Certificate +//RequestType: GET +//Input: input *GetTrustedCertInput +func (s *CertificatesService) GetTrustedCert(input *GetTrustedCertInput) (output *models.TrustedCertView, resp *http.Response, err error) { + path := "/certificates/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetTrustedCert", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.TrustedCertView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetTrustedCertInput - Inputs for GetTrustedCert +type GetTrustedCertInput struct { + Id string +} + +//UpdateTrustedCert - Update a Certificate +//RequestType: PUT +//Input: input *UpdateTrustedCertInput +func (s *CertificatesService) UpdateTrustedCert(input *UpdateTrustedCertInput) (output *models.TrustedCertView, resp *http.Response, err error) { + path := "/certificates/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateTrustedCert", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.TrustedCertView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateTrustedCertInput - Inputs for UpdateTrustedCert +type UpdateTrustedCertInput struct { + Body models.X509FileImportDocView + Id string +} + +//ExportTrustedCert - Export a Certificate +//RequestType: GET +//Input: input *ExportTrustedCertInput +func (s *CertificatesService) ExportTrustedCert(input *ExportTrustedCertInput) (resp *http.Response, err error) { + path := "/certificates/{id}/file" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "ExportTrustedCert", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// ExportTrustedCertInput - Inputs for ExportTrustedCert +type ExportTrustedCertInput struct { + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/certificates/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/certificates/interface.go new file mode 100644 index 00000000..f9b6349a --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/certificates/interface.go @@ -0,0 +1,16 @@ +package certificates + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type CertificatesAPI interface { + GetTrustedCerts(input *GetTrustedCertsInput) (output *models.TrustedCertsView, resp *http.Response, err error) + ImportTrustedCert(input *ImportTrustedCertInput) (output *models.TrustedCertView, resp *http.Response, err error) + DeleteTrustedCertCommand(input *DeleteTrustedCertCommandInput) (resp *http.Response, err error) + GetTrustedCert(input *GetTrustedCertInput) (output *models.TrustedCertView, resp *http.Response, err error) + UpdateTrustedCert(input *UpdateTrustedCertInput) (output *models.TrustedCertView, resp *http.Response, err error) + ExportTrustedCert(input *ExportTrustedCertInput) (resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/config/config.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/config/config.go new file mode 100644 index 00000000..e4794db0 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/config/config.go @@ -0,0 +1,258 @@ +package config + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Config" +) + +//ConfigService provides the API operations for making requests to +// Config endpoint. +type ConfigService struct { + *client.Client +} + +//New createa a new instance of the ConfigService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a ConfigService from the configuration +// svc := config.New(cfg) +// +func New(cfg *config.Config) *ConfigService { + + return &ConfigService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Config operation +func (s *ConfigService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//ConfigExportCommand - [Attention: The endpoint "/config/export" is deprecated. The "config/export/workflows" endpoint should be used instead.] Export a JSON backup of the entire system +//RequestType: GET +//Input: +func (s *ConfigService) ConfigExportCommand() (output *models.ExportData, resp *http.Response, err error) { + path := "/config/export" + op := &request.Operation{ + Name: "ConfigExportCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.ExportData{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetConfigExportWorkflowsCommand - Get the status of pending Exports +//RequestType: GET +//Input: +func (s *ConfigService) GetConfigExportWorkflowsCommand() (output *models.ConfigStatusesView, resp *http.Response, err error) { + path := "/config/export/workflows" + op := &request.Operation{ + Name: "GetConfigExportWorkflowsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.ConfigStatusesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//AddConfigExportWorkflowCommand - Start a JSON backup of the entire system for export +//RequestType: POST +//Input: +func (s *ConfigService) AddConfigExportWorkflowCommand() (output *models.ConfigStatusView, resp *http.Response, err error) { + path := "/config/export/workflows" + op := &request.Operation{ + Name: "AddConfigExportWorkflowCommand", + HTTPMethod: "POST", + HTTPPath: path, + } + output = &models.ConfigStatusView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetConfigExportWorkflowCommand - Check the status of an Export +//RequestType: GET +//Input: input *GetConfigExportWorkflowCommandInput +func (s *ConfigService) GetConfigExportWorkflowCommand(input *GetConfigExportWorkflowCommandInput) (output *models.ConfigStatusView, resp *http.Response, err error) { + path := "/config/export/workflows/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetConfigExportWorkflowCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ConfigStatusView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetConfigExportWorkflowCommandInput - Inputs for GetConfigExportWorkflowCommand +type GetConfigExportWorkflowCommandInput struct { + Id string +} + +//GetConfigExportWorkflowDataCommand - Export a JSON backup of the entire system +//RequestType: GET +//Input: input *GetConfigExportWorkflowDataCommandInput +func (s *ConfigService) GetConfigExportWorkflowDataCommand(input *GetConfigExportWorkflowDataCommandInput) (output *models.ExportData, resp *http.Response, err error) { + path := "/config/export/workflows/{id}/data" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetConfigExportWorkflowDataCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ExportData{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetConfigExportWorkflowDataCommandInput - Inputs for GetConfigExportWorkflowDataCommand +type GetConfigExportWorkflowDataCommandInput struct { + Id string +} + +//ConfigImportCommand - [Attention: The endpoint "/config/import" is deprecated. The "config/import/workflows" endpoint should be used instead.] Import JSON backup. +//RequestType: POST +//Input: input *ConfigImportCommandInput +func (s *ConfigService) ConfigImportCommand(input *ConfigImportCommandInput) (resp *http.Response, err error) { + path := "/config/import" + op := &request.Operation{ + Name: "ConfigImportCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, input.Body, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// ConfigImportCommandInput - Inputs for ConfigImportCommand +type ConfigImportCommandInput struct { + Body string +} + +//GetConfigImportWorkflowsCommand - Get the status of pending imports +//RequestType: GET +//Input: +func (s *ConfigService) GetConfigImportWorkflowsCommand() (output *models.ConfigStatusesView, resp *http.Response, err error) { + path := "/config/import/workflows" + op := &request.Operation{ + Name: "GetConfigImportWorkflowsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.ConfigStatusesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//AddConfigImportWorkflowCommand - Start an Import of a JSON backup +//RequestType: POST +//Input: input *AddConfigImportWorkflowCommandInput +func (s *ConfigService) AddConfigImportWorkflowCommand(input *AddConfigImportWorkflowCommandInput) (resp *http.Response, err error) { + path := "/config/import/workflows" + op := &request.Operation{ + Name: "AddConfigImportWorkflowCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, input.Body, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// AddConfigImportWorkflowCommandInput - Inputs for AddConfigImportWorkflowCommand +type AddConfigImportWorkflowCommandInput struct { + Body models.ExportData +} + +//GetConfigImportWorkflowCommand - Check the status of an Export +//RequestType: GET +//Input: input *GetConfigImportWorkflowCommandInput +func (s *ConfigService) GetConfigImportWorkflowCommand(input *GetConfigImportWorkflowCommandInput) (output *models.ConfigStatusView, resp *http.Response, err error) { + path := "/config/import/workflows/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetConfigImportWorkflowCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ConfigStatusView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetConfigImportWorkflowCommandInput - Inputs for GetConfigImportWorkflowCommand +type GetConfigImportWorkflowCommandInput struct { + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/config/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/config/interface.go new file mode 100644 index 00000000..9ebea8ce --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/config/interface.go @@ -0,0 +1,19 @@ +package config + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type ConfigAPI interface { + ConfigExportCommand() (output *models.ExportData, resp *http.Response, err error) + GetConfigExportWorkflowsCommand() (output *models.ConfigStatusesView, resp *http.Response, err error) + AddConfigExportWorkflowCommand() (output *models.ConfigStatusView, resp *http.Response, err error) + GetConfigExportWorkflowCommand(input *GetConfigExportWorkflowCommandInput) (output *models.ConfigStatusView, resp *http.Response, err error) + GetConfigExportWorkflowDataCommand(input *GetConfigExportWorkflowDataCommandInput) (output *models.ExportData, resp *http.Response, err error) + ConfigImportCommand(input *ConfigImportCommandInput) (resp *http.Response, err error) + GetConfigImportWorkflowsCommand() (output *models.ConfigStatusesView, resp *http.Response, err error) + AddConfigImportWorkflowCommand(input *AddConfigImportWorkflowCommandInput) (resp *http.Response, err error) + GetConfigImportWorkflowCommand(input *GetConfigImportWorkflowCommandInput) (output *models.ConfigStatusView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engineListeners/engineListeners.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engineListeners/engineListeners.go new file mode 100644 index 00000000..aff0340f --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engineListeners/engineListeners.go @@ -0,0 +1,195 @@ +package engineListeners + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "EngineListeners" +) + +//EngineListenersService provides the API operations for making requests to +// EngineListeners endpoint. +type EngineListenersService struct { + *client.Client +} + +//New createa a new instance of the EngineListenersService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a EngineListenersService from the configuration +// svc := engineListeners.New(cfg) +// +func New(cfg *config.Config) *EngineListenersService { + + return &EngineListenersService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a EngineListeners operation +func (s *EngineListenersService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetEngineListenersCommand - Get all Engine Listeners +//RequestType: GET +//Input: input *GetEngineListenersCommandInput +func (s *EngineListenersService) GetEngineListenersCommand(input *GetEngineListenersCommandInput) (output *models.EngineListenersView, resp *http.Response, err error) { + path := "/engineListeners" + op := &request.Operation{ + Name: "GetEngineListenersCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.EngineListenersView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetEngineListenersCommandInput - Inputs for GetEngineListenersCommand +type GetEngineListenersCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddEngineListenerCommand - Create an Engine Listener +//RequestType: POST +//Input: input *AddEngineListenerCommandInput +func (s *EngineListenersService) AddEngineListenerCommand(input *AddEngineListenerCommandInput) (output *models.EngineListenerView, resp *http.Response, err error) { + path := "/engineListeners" + op := &request.Operation{ + Name: "AddEngineListenerCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.EngineListenerView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddEngineListenerCommandInput - Inputs for AddEngineListenerCommand +type AddEngineListenerCommandInput struct { + Body models.EngineListenerView +} + +//DeleteEngineListenerCommand - Delete an Engine Listener +//RequestType: DELETE +//Input: input *DeleteEngineListenerCommandInput +func (s *EngineListenersService) DeleteEngineListenerCommand(input *DeleteEngineListenerCommandInput) (resp *http.Response, err error) { + path := "/engineListeners/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteEngineListenerCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteEngineListenerCommandInput - Inputs for DeleteEngineListenerCommand +type DeleteEngineListenerCommandInput struct { + Id string +} + +//GetEngineListenerCommand - Get an Engine Listener +//RequestType: GET +//Input: input *GetEngineListenerCommandInput +func (s *EngineListenersService) GetEngineListenerCommand(input *GetEngineListenerCommandInput) (output *models.EngineListenerView, resp *http.Response, err error) { + path := "/engineListeners/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetEngineListenerCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.EngineListenerView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetEngineListenerCommandInput - Inputs for GetEngineListenerCommand +type GetEngineListenerCommandInput struct { + Id string +} + +//UpdateEngineListenerCommand - Update an Engine Listener +//RequestType: PUT +//Input: input *UpdateEngineListenerCommandInput +func (s *EngineListenersService) UpdateEngineListenerCommand(input *UpdateEngineListenerCommandInput) (output *models.EngineListenerView, resp *http.Response, err error) { + path := "/engineListeners/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateEngineListenerCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.EngineListenerView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateEngineListenerCommandInput - Inputs for UpdateEngineListenerCommand +type UpdateEngineListenerCommandInput struct { + Body models.EngineListenerView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engineListeners/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engineListeners/interface.go new file mode 100644 index 00000000..78c60209 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engineListeners/interface.go @@ -0,0 +1,15 @@ +package engineListeners + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type EngineListenersAPI interface { + GetEngineListenersCommand(input *GetEngineListenersCommandInput) (output *models.EngineListenersView, resp *http.Response, err error) + AddEngineListenerCommand(input *AddEngineListenerCommandInput) (output *models.EngineListenerView, resp *http.Response, err error) + DeleteEngineListenerCommand(input *DeleteEngineListenerCommandInput) (resp *http.Response, err error) + GetEngineListenerCommand(input *GetEngineListenerCommandInput) (output *models.EngineListenerView, resp *http.Response, err error) + UpdateEngineListenerCommand(input *UpdateEngineListenerCommandInput) (output *models.EngineListenerView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engines/engines.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engines/engines.go new file mode 100644 index 00000000..8ee2c2e4 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engines/engines.go @@ -0,0 +1,305 @@ +package engines + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Engines" +) + +//EnginesService provides the API operations for making requests to +// Engines endpoint. +type EnginesService struct { + *client.Client +} + +//New createa a new instance of the EnginesService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a EnginesService from the configuration +// svc := engines.New(cfg) +// +func New(cfg *config.Config) *EnginesService { + + return &EnginesService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Engines operation +func (s *EnginesService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetEnginesCommand - Get all Engines +//RequestType: GET +//Input: input *GetEnginesCommandInput +func (s *EnginesService) GetEnginesCommand(input *GetEnginesCommandInput) (output *models.EnginesView, resp *http.Response, err error) { + path := "/engines" + op := &request.Operation{ + Name: "GetEnginesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.EnginesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetEnginesCommandInput - Inputs for GetEnginesCommand +type GetEnginesCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddEngineCommand - Add an Engine +//RequestType: POST +//Input: input *AddEngineCommandInput +func (s *EnginesService) AddEngineCommand(input *AddEngineCommandInput) (output *models.EngineView, resp *http.Response, err error) { + path := "/engines" + op := &request.Operation{ + Name: "AddEngineCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.EngineView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddEngineCommandInput - Inputs for AddEngineCommand +type AddEngineCommandInput struct { + Body models.EngineView +} + +//GetEngineCertificatesCommand - Get all Engine Certificates +//RequestType: GET +//Input: input *GetEngineCertificatesCommandInput +func (s *EnginesService) GetEngineCertificatesCommand(input *GetEngineCertificatesCommandInput) (output *models.EngineCertificateView, resp *http.Response, err error) { + path := "/engines/certificates" + op := &request.Operation{ + Name: "GetEngineCertificatesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "alias": input.Alias, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.EngineCertificateView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetEngineCertificatesCommandInput - Inputs for GetEngineCertificatesCommand +type GetEngineCertificatesCommandInput struct { + Page string + NumberPerPage string + Filter string + Alias string + SortKey string + Order string +} + +//GetEngineCertificateCommand - Get an Engine Certificate +//RequestType: GET +//Input: input *GetEngineCertificateCommandInput +func (s *EnginesService) GetEngineCertificateCommand(input *GetEngineCertificateCommandInput) (output *models.EngineCertificateView, resp *http.Response, err error) { + path := "/engines/certificates/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetEngineCertificateCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.EngineCertificateView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetEngineCertificateCommandInput - Inputs for GetEngineCertificateCommand +type GetEngineCertificateCommandInput struct { + Id string +} + +//GetEngineStatusCommand - Get health status of all Engines +//RequestType: GET +//Input: +func (s *EnginesService) GetEngineStatusCommand() (output *models.EngineHealthStatusView, resp *http.Response, err error) { + path := "/engines/status" + op := &request.Operation{ + Name: "GetEngineStatusCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.EngineHealthStatusView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//DeleteEngineCommand - Delete an Engine +//RequestType: DELETE +//Input: input *DeleteEngineCommandInput +func (s *EnginesService) DeleteEngineCommand(input *DeleteEngineCommandInput) (resp *http.Response, err error) { + path := "/engines/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteEngineCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteEngineCommandInput - Inputs for DeleteEngineCommand +type DeleteEngineCommandInput struct { + Id string +} + +//GetEngineCommand - Get an Engine +//RequestType: GET +//Input: input *GetEngineCommandInput +func (s *EnginesService) GetEngineCommand(input *GetEngineCommandInput) (output *models.EngineView, resp *http.Response, err error) { + path := "/engines/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetEngineCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.EngineView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetEngineCommandInput - Inputs for GetEngineCommand +type GetEngineCommandInput struct { + Id string +} + +//UpdateEngineCommand - Update an Engine +//RequestType: PUT +//Input: input *UpdateEngineCommandInput +func (s *EnginesService) UpdateEngineCommand(input *UpdateEngineCommandInput) (output *models.EngineView, resp *http.Response, err error) { + path := "/engines/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateEngineCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.EngineView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateEngineCommandInput - Inputs for UpdateEngineCommand +type UpdateEngineCommandInput struct { + Body models.EngineView + Id string +} + +//GetEngineConfigFileCommand - Get configuration file for an Engine +//RequestType: POST +//Input: input *GetEngineConfigFileCommandInput +func (s *EnginesService) GetEngineConfigFileCommand(input *GetEngineConfigFileCommandInput) (resp *http.Response, err error) { + path := "/engines/{id}/config" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetEngineConfigFileCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// GetEngineConfigFileCommandInput - Inputs for GetEngineConfigFileCommand +type GetEngineConfigFileCommandInput struct { + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engines/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engines/interface.go new file mode 100644 index 00000000..50c037a6 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/engines/interface.go @@ -0,0 +1,19 @@ +package engines + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type EnginesAPI interface { + GetEnginesCommand(input *GetEnginesCommandInput) (output *models.EnginesView, resp *http.Response, err error) + AddEngineCommand(input *AddEngineCommandInput) (output *models.EngineView, resp *http.Response, err error) + GetEngineCertificatesCommand(input *GetEngineCertificatesCommandInput) (output *models.EngineCertificateView, resp *http.Response, err error) + GetEngineCertificateCommand(input *GetEngineCertificateCommandInput) (output *models.EngineCertificateView, resp *http.Response, err error) + GetEngineStatusCommand() (output *models.EngineHealthStatusView, resp *http.Response, err error) + DeleteEngineCommand(input *DeleteEngineCommandInput) (resp *http.Response, err error) + GetEngineCommand(input *GetEngineCommandInput) (output *models.EngineView, resp *http.Response, err error) + UpdateEngineCommand(input *UpdateEngineCommandInput) (output *models.EngineView, resp *http.Response, err error) + GetEngineConfigFileCommand(input *GetEngineConfigFileCommandInput) (resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/globalUnprotectedResources/globalUnprotectedResources.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/globalUnprotectedResources/globalUnprotectedResources.go new file mode 100644 index 00000000..db66efea --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/globalUnprotectedResources/globalUnprotectedResources.go @@ -0,0 +1,195 @@ +package globalUnprotectedResources + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "GlobalUnprotectedResources" +) + +//GlobalUnprotectedResourcesService provides the API operations for making requests to +// GlobalUnprotectedResources endpoint. +type GlobalUnprotectedResourcesService struct { + *client.Client +} + +//New createa a new instance of the GlobalUnprotectedResourcesService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a GlobalUnprotectedResourcesService from the configuration +// svc := globalUnprotectedResources.New(cfg) +// +func New(cfg *config.Config) *GlobalUnprotectedResourcesService { + + return &GlobalUnprotectedResourcesService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a GlobalUnprotectedResources operation +func (s *GlobalUnprotectedResourcesService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetGlobalUnprotectedResourcesCommand - Get all Global Unprotected Resources +//RequestType: GET +//Input: input *GetGlobalUnprotectedResourcesCommandInput +func (s *GlobalUnprotectedResourcesService) GetGlobalUnprotectedResourcesCommand(input *GetGlobalUnprotectedResourcesCommandInput) (output *models.GlobalUnprotectedResourcesView, resp *http.Response, err error) { + path := "/globalUnprotectedResources" + op := &request.Operation{ + Name: "GetGlobalUnprotectedResourcesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.GlobalUnprotectedResourcesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetGlobalUnprotectedResourcesCommandInput - Inputs for GetGlobalUnprotectedResourcesCommand +type GetGlobalUnprotectedResourcesCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddGlobalUnprotectedResourceCommand - Add a Global Unprotected Resource +//RequestType: POST +//Input: input *AddGlobalUnprotectedResourceCommandInput +func (s *GlobalUnprotectedResourcesService) AddGlobalUnprotectedResourceCommand(input *AddGlobalUnprotectedResourceCommandInput) (output *models.GlobalUnprotectedResourceView, resp *http.Response, err error) { + path := "/globalUnprotectedResources" + op := &request.Operation{ + Name: "AddGlobalUnprotectedResourceCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.GlobalUnprotectedResourceView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddGlobalUnprotectedResourceCommandInput - Inputs for AddGlobalUnprotectedResourceCommand +type AddGlobalUnprotectedResourceCommandInput struct { + Body models.GlobalUnprotectedResourceView +} + +//DeleteGlobalUnprotectedResourceCommand - Delete a Global Unprotected Resource +//RequestType: DELETE +//Input: input *DeleteGlobalUnprotectedResourceCommandInput +func (s *GlobalUnprotectedResourcesService) DeleteGlobalUnprotectedResourceCommand(input *DeleteGlobalUnprotectedResourceCommandInput) (resp *http.Response, err error) { + path := "/globalUnprotectedResources/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteGlobalUnprotectedResourceCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteGlobalUnprotectedResourceCommandInput - Inputs for DeleteGlobalUnprotectedResourceCommand +type DeleteGlobalUnprotectedResourceCommandInput struct { + Id string +} + +//GetGlobalUnprotectedResourceCommand - Get a Global Unprotected Resource +//RequestType: GET +//Input: input *GetGlobalUnprotectedResourceCommandInput +func (s *GlobalUnprotectedResourcesService) GetGlobalUnprotectedResourceCommand(input *GetGlobalUnprotectedResourceCommandInput) (output *models.GlobalUnprotectedResourceView, resp *http.Response, err error) { + path := "/globalUnprotectedResources/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetGlobalUnprotectedResourceCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.GlobalUnprotectedResourceView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetGlobalUnprotectedResourceCommandInput - Inputs for GetGlobalUnprotectedResourceCommand +type GetGlobalUnprotectedResourceCommandInput struct { + Id string +} + +//UpdateGlobalUnprotectedResourceCommand - Update a Global Unprotected Resource +//RequestType: PUT +//Input: input *UpdateGlobalUnprotectedResourceCommandInput +func (s *GlobalUnprotectedResourcesService) UpdateGlobalUnprotectedResourceCommand(input *UpdateGlobalUnprotectedResourceCommandInput) (output *models.GlobalUnprotectedResourceView, resp *http.Response, err error) { + path := "/globalUnprotectedResources/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateGlobalUnprotectedResourceCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.GlobalUnprotectedResourceView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateGlobalUnprotectedResourceCommandInput - Inputs for UpdateGlobalUnprotectedResourceCommand +type UpdateGlobalUnprotectedResourceCommandInput struct { + Body models.GlobalUnprotectedResourceView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/globalUnprotectedResources/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/globalUnprotectedResources/interface.go new file mode 100644 index 00000000..59bbf4a9 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/globalUnprotectedResources/interface.go @@ -0,0 +1,15 @@ +package globalUnprotectedResources + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type GlobalUnprotectedResourcesAPI interface { + GetGlobalUnprotectedResourcesCommand(input *GetGlobalUnprotectedResourcesCommandInput) (output *models.GlobalUnprotectedResourcesView, resp *http.Response, err error) + AddGlobalUnprotectedResourceCommand(input *AddGlobalUnprotectedResourceCommandInput) (output *models.GlobalUnprotectedResourceView, resp *http.Response, err error) + DeleteGlobalUnprotectedResourceCommand(input *DeleteGlobalUnprotectedResourceCommandInput) (resp *http.Response, err error) + GetGlobalUnprotectedResourceCommand(input *GetGlobalUnprotectedResourceCommandInput) (output *models.GlobalUnprotectedResourceView, resp *http.Response, err error) + UpdateGlobalUnprotectedResourceCommand(input *UpdateGlobalUnprotectedResourceCommandInput) (output *models.GlobalUnprotectedResourceView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/highAvailability/highAvailability.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/highAvailability/highAvailability.go new file mode 100644 index 00000000..29f36c84 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/highAvailability/highAvailability.go @@ -0,0 +1,431 @@ +package highAvailability + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "HighAvailability" +) + +//HighAvailabilityService provides the API operations for making requests to +// HighAvailability endpoint. +type HighAvailabilityService struct { + *client.Client +} + +//New createa a new instance of the HighAvailabilityService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a HighAvailabilityService from the configuration +// svc := highAvailability.New(cfg) +// +func New(cfg *config.Config) *HighAvailabilityService { + + return &HighAvailabilityService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a HighAvailability operation +func (s *HighAvailabilityService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetAvailabilityProfilesCommand - Get all Availability Profiles +//RequestType: GET +//Input: input *GetAvailabilityProfilesCommandInput +func (s *HighAvailabilityService) GetAvailabilityProfilesCommand(input *GetAvailabilityProfilesCommandInput) (output *models.AvailabilityProfilesView, resp *http.Response, err error) { + path := "/highAvailability/availabilityProfiles" + op := &request.Operation{ + Name: "GetAvailabilityProfilesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.AvailabilityProfilesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAvailabilityProfilesCommandInput - Inputs for GetAvailabilityProfilesCommand +type GetAvailabilityProfilesCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddAvailabilityProfileCommand - Create an Availability Profile +//RequestType: POST +//Input: input *AddAvailabilityProfileCommandInput +func (s *HighAvailabilityService) AddAvailabilityProfileCommand(input *AddAvailabilityProfileCommandInput) (output *models.AvailabilityProfileView, resp *http.Response, err error) { + path := "/highAvailability/availabilityProfiles" + op := &request.Operation{ + Name: "AddAvailabilityProfileCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AvailabilityProfileView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddAvailabilityProfileCommandInput - Inputs for AddAvailabilityProfileCommand +type AddAvailabilityProfileCommandInput struct { + Body models.AvailabilityProfileView +} + +//GetAvailabilityProfileDescriptorsCommand - Get descriptors for all Availability Profiles +//RequestType: GET +//Input: +func (s *HighAvailabilityService) GetAvailabilityProfileDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) { + path := "/highAvailability/availabilityProfiles/descriptors" + op := &request.Operation{ + Name: "GetAvailabilityProfileDescriptorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.DescriptorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetAvailabilityProfileDescriptorCommand - Get a descriptor for an Availability Profile +//RequestType: GET +//Input: input *GetAvailabilityProfileDescriptorCommandInput +func (s *HighAvailabilityService) GetAvailabilityProfileDescriptorCommand(input *GetAvailabilityProfileDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) { + path := "/highAvailability/availabilityProfiles/descriptors/{availabilityProfileType}" + path = strings.Replace(path, "{availabilityProfileType}", input.AvailabilityProfileType, -1) + + op := &request.Operation{ + Name: "GetAvailabilityProfileDescriptorCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.DescriptorView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAvailabilityProfileDescriptorCommandInput - Inputs for GetAvailabilityProfileDescriptorCommand +type GetAvailabilityProfileDescriptorCommandInput struct { + AvailabilityProfileType string +} + +//DeleteAvailabilityProfileCommand - Delete an Availability Profile +//RequestType: DELETE +//Input: input *DeleteAvailabilityProfileCommandInput +func (s *HighAvailabilityService) DeleteAvailabilityProfileCommand(input *DeleteAvailabilityProfileCommandInput) (resp *http.Response, err error) { + path := "/highAvailability/availabilityProfiles/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteAvailabilityProfileCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteAvailabilityProfileCommandInput - Inputs for DeleteAvailabilityProfileCommand +type DeleteAvailabilityProfileCommandInput struct { + Id string +} + +//GetAvailabilityProfileCommand - Get an Availability Profile +//RequestType: GET +//Input: input *GetAvailabilityProfileCommandInput +func (s *HighAvailabilityService) GetAvailabilityProfileCommand(input *GetAvailabilityProfileCommandInput) (output *models.AvailabilityProfileView, resp *http.Response, err error) { + path := "/highAvailability/availabilityProfiles/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetAvailabilityProfileCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AvailabilityProfileView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetAvailabilityProfileCommandInput - Inputs for GetAvailabilityProfileCommand +type GetAvailabilityProfileCommandInput struct { + Id string +} + +//UpdateAvailabilityProfileCommand - Update an Availability Profile +//RequestType: PUT +//Input: input *UpdateAvailabilityProfileCommandInput +func (s *HighAvailabilityService) UpdateAvailabilityProfileCommand(input *UpdateAvailabilityProfileCommandInput) (output *models.AvailabilityProfileView, resp *http.Response, err error) { + path := "/highAvailability/availabilityProfiles/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateAvailabilityProfileCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AvailabilityProfileView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateAvailabilityProfileCommandInput - Inputs for UpdateAvailabilityProfileCommand +type UpdateAvailabilityProfileCommandInput struct { + Body models.AvailabilityProfileView + Id string +} + +//GetLoadBalancingStrategiesCommand - Get all Load Balancing Strategies +//RequestType: GET +//Input: input *GetLoadBalancingStrategiesCommandInput +func (s *HighAvailabilityService) GetLoadBalancingStrategiesCommand(input *GetLoadBalancingStrategiesCommandInput) (output *models.LoadBalancingStrategiesView, resp *http.Response, err error) { + path := "/highAvailability/loadBalancingStrategies" + op := &request.Operation{ + Name: "GetLoadBalancingStrategiesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.LoadBalancingStrategiesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetLoadBalancingStrategiesCommandInput - Inputs for GetLoadBalancingStrategiesCommand +type GetLoadBalancingStrategiesCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddLoadBalancingStrategyCommand - Create a Load Balancing Strategy +//RequestType: POST +//Input: input *AddLoadBalancingStrategyCommandInput +func (s *HighAvailabilityService) AddLoadBalancingStrategyCommand(input *AddLoadBalancingStrategyCommandInput) (output *models.LoadBalancingStrategyView, resp *http.Response, err error) { + path := "/highAvailability/loadBalancingStrategies" + op := &request.Operation{ + Name: "AddLoadBalancingStrategyCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.LoadBalancingStrategyView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddLoadBalancingStrategyCommandInput - Inputs for AddLoadBalancingStrategyCommand +type AddLoadBalancingStrategyCommandInput struct { + Body models.LoadBalancingStrategyView +} + +//GetLoadBalancingStrategyDescriptorsCommand - Get descriptors for all Load Balancing Strategies +//RequestType: GET +//Input: +func (s *HighAvailabilityService) GetLoadBalancingStrategyDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) { + path := "/highAvailability/loadBalancingStrategies/descriptors" + op := &request.Operation{ + Name: "GetLoadBalancingStrategyDescriptorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.DescriptorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetLoadBalancingStrategyDescriptorCommand - Get a descriptor for a Load Balancing Strategy +//RequestType: GET +//Input: input *GetLoadBalancingStrategyDescriptorCommandInput +func (s *HighAvailabilityService) GetLoadBalancingStrategyDescriptorCommand(input *GetLoadBalancingStrategyDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) { + path := "/highAvailability/loadBalancingStrategies/descriptors/{loadBalancingStrategyType}" + path = strings.Replace(path, "{loadBalancingStrategyType}", input.LoadBalancingStrategyType, -1) + + op := &request.Operation{ + Name: "GetLoadBalancingStrategyDescriptorCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.DescriptorView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetLoadBalancingStrategyDescriptorCommandInput - Inputs for GetLoadBalancingStrategyDescriptorCommand +type GetLoadBalancingStrategyDescriptorCommandInput struct { + LoadBalancingStrategyType string +} + +//DeleteLoadBalancingStrategyCommand - Delete a Load Balancing Strategy +//RequestType: DELETE +//Input: input *DeleteLoadBalancingStrategyCommandInput +func (s *HighAvailabilityService) DeleteLoadBalancingStrategyCommand(input *DeleteLoadBalancingStrategyCommandInput) (resp *http.Response, err error) { + path := "/highAvailability/loadBalancingStrategies/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteLoadBalancingStrategyCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteLoadBalancingStrategyCommandInput - Inputs for DeleteLoadBalancingStrategyCommand +type DeleteLoadBalancingStrategyCommandInput struct { + Id string +} + +//GetLoadBalancingStrategyCommand - Get a Load Balancing Strategy +//RequestType: GET +//Input: input *GetLoadBalancingStrategyCommandInput +func (s *HighAvailabilityService) GetLoadBalancingStrategyCommand(input *GetLoadBalancingStrategyCommandInput) (output *models.LoadBalancingStrategyView, resp *http.Response, err error) { + path := "/highAvailability/loadBalancingStrategies/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetLoadBalancingStrategyCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.LoadBalancingStrategyView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetLoadBalancingStrategyCommandInput - Inputs for GetLoadBalancingStrategyCommand +type GetLoadBalancingStrategyCommandInput struct { + Id string +} + +//UpdateLoadBalancingStrategyCommand - Update a Load Balancing Strategy +//RequestType: PUT +//Input: input *UpdateLoadBalancingStrategyCommandInput +func (s *HighAvailabilityService) UpdateLoadBalancingStrategyCommand(input *UpdateLoadBalancingStrategyCommandInput) (output *models.LoadBalancingStrategyView, resp *http.Response, err error) { + path := "/highAvailability/loadBalancingStrategies/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateLoadBalancingStrategyCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.LoadBalancingStrategyView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateLoadBalancingStrategyCommandInput - Inputs for UpdateLoadBalancingStrategyCommand +type UpdateLoadBalancingStrategyCommandInput struct { + Body models.LoadBalancingStrategyView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/highAvailability/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/highAvailability/interface.go new file mode 100644 index 00000000..b8b63b85 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/highAvailability/interface.go @@ -0,0 +1,24 @@ +package highAvailability + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type HighAvailabilityAPI interface { + GetAvailabilityProfilesCommand(input *GetAvailabilityProfilesCommandInput) (output *models.AvailabilityProfilesView, resp *http.Response, err error) + AddAvailabilityProfileCommand(input *AddAvailabilityProfileCommandInput) (output *models.AvailabilityProfileView, resp *http.Response, err error) + GetAvailabilityProfileDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) + GetAvailabilityProfileDescriptorCommand(input *GetAvailabilityProfileDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) + DeleteAvailabilityProfileCommand(input *DeleteAvailabilityProfileCommandInput) (resp *http.Response, err error) + GetAvailabilityProfileCommand(input *GetAvailabilityProfileCommandInput) (output *models.AvailabilityProfileView, resp *http.Response, err error) + UpdateAvailabilityProfileCommand(input *UpdateAvailabilityProfileCommandInput) (output *models.AvailabilityProfileView, resp *http.Response, err error) + GetLoadBalancingStrategiesCommand(input *GetLoadBalancingStrategiesCommandInput) (output *models.LoadBalancingStrategiesView, resp *http.Response, err error) + AddLoadBalancingStrategyCommand(input *AddLoadBalancingStrategyCommandInput) (output *models.LoadBalancingStrategyView, resp *http.Response, err error) + GetLoadBalancingStrategyDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) + GetLoadBalancingStrategyDescriptorCommand(input *GetLoadBalancingStrategyDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) + DeleteLoadBalancingStrategyCommand(input *DeleteLoadBalancingStrategyCommandInput) (resp *http.Response, err error) + GetLoadBalancingStrategyCommand(input *GetLoadBalancingStrategyCommandInput) (output *models.LoadBalancingStrategyView, resp *http.Response, err error) + UpdateLoadBalancingStrategyCommand(input *UpdateLoadBalancingStrategyCommandInput) (output *models.LoadBalancingStrategyView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders/hsmProviders.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders/hsmProviders.go new file mode 100644 index 00000000..fd6e1bff --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders/hsmProviders.go @@ -0,0 +1,214 @@ +package hsmProviders + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "HsmProviders" +) + +//HsmProvidersService provides the API operations for making requests to +// HsmProviders endpoint. +type HsmProvidersService struct { + *client.Client +} + +//New createa a new instance of the HsmProvidersService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a HsmProvidersService from the configuration +// svc := hsmProviders.New(cfg) +// +func New(cfg *config.Config) *HsmProvidersService { + + return &HsmProvidersService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a HsmProviders operation +func (s *HsmProvidersService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetHsmProvidersCommand - Get all HSM Providers +//RequestType: GET +//Input: input *GetHsmProvidersCommandInput +func (s *HsmProvidersService) GetHsmProvidersCommand(input *GetHsmProvidersCommandInput) (output *models.HsmProviderView, resp *http.Response, err error) { + path := "/hsmProviders" + op := &request.Operation{ + Name: "GetHsmProvidersCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.HsmProviderView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetHsmProvidersCommandInput - Inputs for GetHsmProvidersCommand +type GetHsmProvidersCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddHsmProviderCommand - Create an HSM Provider +//RequestType: POST +//Input: input *AddHsmProviderCommandInput +func (s *HsmProvidersService) AddHsmProviderCommand(input *AddHsmProviderCommandInput) (output *models.HsmProviderView, resp *http.Response, err error) { + path := "/hsmProviders" + op := &request.Operation{ + Name: "AddHsmProviderCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.HsmProviderView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddHsmProviderCommandInput - Inputs for AddHsmProviderCommand +type AddHsmProviderCommandInput struct { + Body models.HsmProviderView +} + +//GetHsmProviderDescriptorsCommand - Get descriptors for all HSM Providers +//RequestType: GET +//Input: +func (s *HsmProvidersService) GetHsmProviderDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) { + path := "/hsmProviders/descriptors" + op := &request.Operation{ + Name: "GetHsmProviderDescriptorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.DescriptorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//DeleteHsmProviderCommand - Delete an HSM Provider +//RequestType: DELETE +//Input: input *DeleteHsmProviderCommandInput +func (s *HsmProvidersService) DeleteHsmProviderCommand(input *DeleteHsmProviderCommandInput) (resp *http.Response, err error) { + path := "/hsmProviders/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteHsmProviderCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteHsmProviderCommandInput - Inputs for DeleteHsmProviderCommand +type DeleteHsmProviderCommandInput struct { + Id string +} + +//GetHsmProviderCommand - Get an HSM Provider +//RequestType: GET +//Input: input *GetHsmProviderCommandInput +func (s *HsmProvidersService) GetHsmProviderCommand(input *GetHsmProviderCommandInput) (output *models.HsmProviderView, resp *http.Response, err error) { + path := "/hsmProviders/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetHsmProviderCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.HsmProviderView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetHsmProviderCommandInput - Inputs for GetHsmProviderCommand +type GetHsmProviderCommandInput struct { + Id string +} + +//UpdateHsmProviderCommand - Update an HSM Provider +//RequestType: PUT +//Input: input *UpdateHsmProviderCommandInput +func (s *HsmProvidersService) UpdateHsmProviderCommand(input *UpdateHsmProviderCommandInput) (output *models.HsmProviderView, resp *http.Response, err error) { + path := "/hsmProviders/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateHsmProviderCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.HsmProviderView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateHsmProviderCommandInput - Inputs for UpdateHsmProviderCommand +type UpdateHsmProviderCommandInput struct { + Body models.HsmProviderView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders/interface.go new file mode 100644 index 00000000..ae1289e3 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders/interface.go @@ -0,0 +1,16 @@ +package hsmProviders + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type HsmProvidersAPI interface { + GetHsmProvidersCommand(input *GetHsmProvidersCommandInput) (output *models.HsmProviderView, resp *http.Response, err error) + AddHsmProviderCommand(input *AddHsmProviderCommandInput) (output *models.HsmProviderView, resp *http.Response, err error) + GetHsmProviderDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) + DeleteHsmProviderCommand(input *DeleteHsmProviderCommandInput) (resp *http.Response, err error) + GetHsmProviderCommand(input *GetHsmProviderCommandInput) (output *models.HsmProviderView, resp *http.Response, err error) + UpdateHsmProviderCommand(input *UpdateHsmProviderCommandInput) (output *models.HsmProviderView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpConfig/httpConfig.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpConfig/httpConfig.go new file mode 100644 index 00000000..f9b70eda --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpConfig/httpConfig.go @@ -0,0 +1,302 @@ +package httpConfig + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "HttpConfig" +) + +//HttpConfigService provides the API operations for making requests to +// HttpConfig endpoint. +type HttpConfigService struct { + *client.Client +} + +//New createa a new instance of the HttpConfigService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a HttpConfigService from the configuration +// svc := httpConfig.New(cfg) +// +func New(cfg *config.Config) *HttpConfigService { + + return &HttpConfigService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a HttpConfig operation +func (s *HttpConfigService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeleteHttpMonitoringCommand - Resets the HTTP monitoring auditLevel to default value +//RequestType: DELETE +//Input: +func (s *HttpConfigService) DeleteHttpMonitoringCommand() (resp *http.Response, err error) { + path := "/httpConfig/monitoring" + op := &request.Operation{ + Name: "DeleteHttpMonitoringCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetHttpMonitoringCommand - Get the HTTP monitoring auditLevel +//RequestType: GET +//Input: +func (s *HttpConfigService) GetHttpMonitoringCommand() (output *models.HttpMonitoringView, resp *http.Response, err error) { + path := "/httpConfig/monitoring" + op := &request.Operation{ + Name: "GetHttpMonitoringCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.HttpMonitoringView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateHttpMonitoringCommand - Update the HTTP monitoring auditLevel +//RequestType: PUT +//Input: input *UpdateHttpMonitoringCommandInput +func (s *HttpConfigService) UpdateHttpMonitoringCommand(input *UpdateHttpMonitoringCommandInput) (output *models.HttpMonitoringView, resp *http.Response, err error) { + path := "/httpConfig/monitoring" + op := &request.Operation{ + Name: "UpdateHttpMonitoringCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.HttpMonitoringView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateHttpMonitoringCommandInput - Inputs for UpdateHttpMonitoringCommand +type UpdateHttpMonitoringCommandInput struct { + Body models.HttpMonitoringView +} + +//DeleteHostSourceCommand - Resets the HTTP request Host Source type to default values +//RequestType: DELETE +//Input: +func (s *HttpConfigService) DeleteHostSourceCommand() (resp *http.Response, err error) { + path := "/httpConfig/request/hostSource" + op := &request.Operation{ + Name: "DeleteHostSourceCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetHostSourceCommand - Get the HTTP request Host Source type +//RequestType: GET +//Input: +func (s *HttpConfigService) GetHostSourceCommand() (output *models.HostMultiValueSourceView, resp *http.Response, err error) { + path := "/httpConfig/request/hostSource" + op := &request.Operation{ + Name: "GetHostSourceCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.HostMultiValueSourceView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateHostSourceCommand - Update the HTTP request Host Source type +//RequestType: PUT +//Input: input *UpdateHostSourceCommandInput +func (s *HttpConfigService) UpdateHostSourceCommand(input *UpdateHostSourceCommandInput) (output *models.HostMultiValueSourceView, resp *http.Response, err error) { + path := "/httpConfig/request/hostSource" + op := &request.Operation{ + Name: "UpdateHostSourceCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.HostMultiValueSourceView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateHostSourceCommandInput - Inputs for UpdateHostSourceCommand +type UpdateHostSourceCommandInput struct { + Body models.HostMultiValueSourceView +} + +//DeleteIpSourceCommand - Resets the HTTP request IP Source type to default values +//RequestType: DELETE +//Input: +func (s *HttpConfigService) DeleteIpSourceCommand() (resp *http.Response, err error) { + path := "/httpConfig/request/ipSource" + op := &request.Operation{ + Name: "DeleteIpSourceCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetIpSourceCommand - Get the HTTP request IP Source type +//RequestType: GET +//Input: +func (s *HttpConfigService) GetIpSourceCommand() (output *models.IpMultiValueSourceView, resp *http.Response, err error) { + path := "/httpConfig/request/ipSource" + op := &request.Operation{ + Name: "GetIpSourceCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.IpMultiValueSourceView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateIpSourceCommand - Update the HTTP request IP Source type +//RequestType: PUT +//Input: input *UpdateIpSourceCommandInput +func (s *HttpConfigService) UpdateIpSourceCommand(input *UpdateIpSourceCommandInput) (output *models.IpMultiValueSourceView, resp *http.Response, err error) { + path := "/httpConfig/request/ipSource" + op := &request.Operation{ + Name: "UpdateIpSourceCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.IpMultiValueSourceView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateIpSourceCommandInput - Inputs for UpdateIpSourceCommand +type UpdateIpSourceCommandInput struct { + Body models.IpMultiValueSourceView +} + +//DeleteProtoSourceCommand - Resets the HTTP request Protocol Source type to default values +//RequestType: DELETE +//Input: +func (s *HttpConfigService) DeleteProtoSourceCommand() (resp *http.Response, err error) { + path := "/httpConfig/request/protocolSource" + op := &request.Operation{ + Name: "DeleteProtoSourceCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetProtoSourceCommand - Get the HTTP request Protocol Source type +//RequestType: GET +//Input: +func (s *HttpConfigService) GetProtoSourceCommand() (output *models.ProtocolSourceView, resp *http.Response, err error) { + path := "/httpConfig/request/protocolSource" + op := &request.Operation{ + Name: "GetProtoSourceCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.ProtocolSourceView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateProtocolSourceCommand - Update the HTTP request Protocol Source type +//RequestType: PUT +//Input: input *UpdateProtocolSourceCommandInput +func (s *HttpConfigService) UpdateProtocolSourceCommand(input *UpdateProtocolSourceCommandInput) (output *models.ProtocolSourceView, resp *http.Response, err error) { + path := "/httpConfig/request/protocolSource" + op := &request.Operation{ + Name: "UpdateProtocolSourceCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ProtocolSourceView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateProtocolSourceCommandInput - Inputs for UpdateProtocolSourceCommand +type UpdateProtocolSourceCommandInput struct { + Body models.ProtocolSourceView +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpConfig/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpConfig/interface.go new file mode 100644 index 00000000..15ddc90e --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpConfig/interface.go @@ -0,0 +1,22 @@ +package httpConfig + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type HttpConfigAPI interface { + DeleteHttpMonitoringCommand() (resp *http.Response, err error) + GetHttpMonitoringCommand() (output *models.HttpMonitoringView, resp *http.Response, err error) + UpdateHttpMonitoringCommand(input *UpdateHttpMonitoringCommandInput) (output *models.HttpMonitoringView, resp *http.Response, err error) + DeleteHostSourceCommand() (resp *http.Response, err error) + GetHostSourceCommand() (output *models.HostMultiValueSourceView, resp *http.Response, err error) + UpdateHostSourceCommand(input *UpdateHostSourceCommandInput) (output *models.HostMultiValueSourceView, resp *http.Response, err error) + DeleteIpSourceCommand() (resp *http.Response, err error) + GetIpSourceCommand() (output *models.IpMultiValueSourceView, resp *http.Response, err error) + UpdateIpSourceCommand(input *UpdateIpSourceCommandInput) (output *models.IpMultiValueSourceView, resp *http.Response, err error) + DeleteProtoSourceCommand() (resp *http.Response, err error) + GetProtoSourceCommand() (output *models.ProtocolSourceView, resp *http.Response, err error) + UpdateProtocolSourceCommand(input *UpdateProtocolSourceCommandInput) (output *models.ProtocolSourceView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners/httpsListeners.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners/httpsListeners.go new file mode 100644 index 00000000..e6734cb3 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners/httpsListeners.go @@ -0,0 +1,135 @@ +package httpsListeners + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "HttpsListeners" +) + +//HttpsListenersService provides the API operations for making requests to +// HttpsListeners endpoint. +type HttpsListenersService struct { + *client.Client +} + +//New createa a new instance of the HttpsListenersService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a HttpsListenersService from the configuration +// svc := httpsListeners.New(cfg) +// +func New(cfg *config.Config) *HttpsListenersService { + + return &HttpsListenersService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a HttpsListeners operation +func (s *HttpsListenersService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetHttpsListenersCommand - Get all HTTPS Listeners +//RequestType: GET +//Input: input *GetHttpsListenersCommandInput +func (s *HttpsListenersService) GetHttpsListenersCommand(input *GetHttpsListenersCommandInput) (output *models.HttpsListenersView, resp *http.Response, err error) { + path := "/httpsListeners" + op := &request.Operation{ + Name: "GetHttpsListenersCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.HttpsListenersView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetHttpsListenersCommandInput - Inputs for GetHttpsListenersCommand +type GetHttpsListenersCommandInput struct { + SortKey string + Order string +} + +//GetHttpsListenerCommand - Get an HTTPS Listener +//RequestType: GET +//Input: input *GetHttpsListenerCommandInput +func (s *HttpsListenersService) GetHttpsListenerCommand(input *GetHttpsListenerCommandInput) (output *models.HttpsListenerView, resp *http.Response, err error) { + path := "/httpsListeners/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetHttpsListenerCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.HttpsListenerView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetHttpsListenerCommandInput - Inputs for GetHttpsListenerCommand +type GetHttpsListenerCommandInput struct { + Id string +} + +//UpdateHttpsListener - Update an HTTPS Listener +//RequestType: PUT +//Input: input *UpdateHttpsListenerInput +func (s *HttpsListenersService) UpdateHttpsListener(input *UpdateHttpsListenerInput) (output *models.HttpsListenerView, resp *http.Response, err error) { + path := "/httpsListeners/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateHttpsListener", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.HttpsListenerView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateHttpsListenerInput - Inputs for UpdateHttpsListener +type UpdateHttpsListenerInput struct { + Body models.HttpsListenerView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners/interface.go new file mode 100644 index 00000000..63a0abe0 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners/interface.go @@ -0,0 +1,13 @@ +package httpsListeners + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type HttpsListenersAPI interface { + GetHttpsListenersCommand(input *GetHttpsListenersCommandInput) (output *models.HttpsListenersView, resp *http.Response, err error) + GetHttpsListenerCommand(input *GetHttpsListenerCommandInput) (output *models.HttpsListenerView, resp *http.Response, err error) + UpdateHttpsListener(input *UpdateHttpsListenerInput) (output *models.HttpsListenerView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/identityMappings/identityMappings.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/identityMappings/identityMappings.go new file mode 100644 index 00000000..6b938e68 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/identityMappings/identityMappings.go @@ -0,0 +1,241 @@ +package identityMappings + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "IdentityMappings" +) + +//IdentityMappingsService provides the API operations for making requests to +// IdentityMappings endpoint. +type IdentityMappingsService struct { + *client.Client +} + +//New createa a new instance of the IdentityMappingsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a IdentityMappingsService from the configuration +// svc := identityMappings.New(cfg) +// +func New(cfg *config.Config) *IdentityMappingsService { + + return &IdentityMappingsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a IdentityMappings operation +func (s *IdentityMappingsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetIdentityMappingsCommand - Get all Identity Mappings +//RequestType: GET +//Input: input *GetIdentityMappingsCommandInput +func (s *IdentityMappingsService) GetIdentityMappingsCommand(input *GetIdentityMappingsCommandInput) (output *models.IdentityMappingsView, resp *http.Response, err error) { + path := "/identityMappings" + op := &request.Operation{ + Name: "GetIdentityMappingsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.IdentityMappingsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetIdentityMappingsCommandInput - Inputs for GetIdentityMappingsCommand +type GetIdentityMappingsCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddIdentityMappingCommand - Create an Identity Mapping +//RequestType: POST +//Input: input *AddIdentityMappingCommandInput +func (s *IdentityMappingsService) AddIdentityMappingCommand(input *AddIdentityMappingCommandInput) (output *models.IdentityMappingView, resp *http.Response, err error) { + path := "/identityMappings" + op := &request.Operation{ + Name: "AddIdentityMappingCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.IdentityMappingView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddIdentityMappingCommandInput - Inputs for AddIdentityMappingCommand +type AddIdentityMappingCommandInput struct { + Body models.IdentityMappingView +} + +//GetIdentityMappingDescriptorsCommand - Get descriptors for all supported Identity Mapping types +//RequestType: GET +//Input: +func (s *IdentityMappingsService) GetIdentityMappingDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) { + path := "/identityMappings/descriptors" + op := &request.Operation{ + Name: "GetIdentityMappingDescriptorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.DescriptorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetIdentityMappingDescriptorCommand - Get descriptor for an Identity Mapping type +//RequestType: GET +//Input: input *GetIdentityMappingDescriptorCommandInput +func (s *IdentityMappingsService) GetIdentityMappingDescriptorCommand(input *GetIdentityMappingDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) { + path := "/identityMappings/descriptors/{identityMappingType}" + path = strings.Replace(path, "{identityMappingType}", input.IdentityMappingType, -1) + + op := &request.Operation{ + Name: "GetIdentityMappingDescriptorCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.DescriptorView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetIdentityMappingDescriptorCommandInput - Inputs for GetIdentityMappingDescriptorCommand +type GetIdentityMappingDescriptorCommandInput struct { + IdentityMappingType string +} + +//DeleteIdentityMappingCommand - Delete an Identity Mapping +//RequestType: DELETE +//Input: input *DeleteIdentityMappingCommandInput +func (s *IdentityMappingsService) DeleteIdentityMappingCommand(input *DeleteIdentityMappingCommandInput) (resp *http.Response, err error) { + path := "/identityMappings/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteIdentityMappingCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteIdentityMappingCommandInput - Inputs for DeleteIdentityMappingCommand +type DeleteIdentityMappingCommandInput struct { + Id string +} + +//GetIdentityMappingCommand - Get an Identity Mapping +//RequestType: GET +//Input: input *GetIdentityMappingCommandInput +func (s *IdentityMappingsService) GetIdentityMappingCommand(input *GetIdentityMappingCommandInput) (output *models.IdentityMappingView, resp *http.Response, err error) { + path := "/identityMappings/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetIdentityMappingCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.IdentityMappingView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetIdentityMappingCommandInput - Inputs for GetIdentityMappingCommand +type GetIdentityMappingCommandInput struct { + Id string +} + +//UpdateIdentityMappingCommand - Update an Identity Mapping +//RequestType: PUT +//Input: input *UpdateIdentityMappingCommandInput +func (s *IdentityMappingsService) UpdateIdentityMappingCommand(input *UpdateIdentityMappingCommandInput) (output *models.IdentityMappingView, resp *http.Response, err error) { + path := "/identityMappings/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateIdentityMappingCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.IdentityMappingView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateIdentityMappingCommandInput - Inputs for UpdateIdentityMappingCommand +type UpdateIdentityMappingCommandInput struct { + Body models.IdentityMappingView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/identityMappings/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/identityMappings/interface.go new file mode 100644 index 00000000..692bff2c --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/identityMappings/interface.go @@ -0,0 +1,17 @@ +package identityMappings + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type IdentityMappingsAPI interface { + GetIdentityMappingsCommand(input *GetIdentityMappingsCommandInput) (output *models.IdentityMappingsView, resp *http.Response, err error) + AddIdentityMappingCommand(input *AddIdentityMappingCommandInput) (output *models.IdentityMappingView, resp *http.Response, err error) + GetIdentityMappingDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) + GetIdentityMappingDescriptorCommand(input *GetIdentityMappingDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) + DeleteIdentityMappingCommand(input *DeleteIdentityMappingCommandInput) (resp *http.Response, err error) + GetIdentityMappingCommand(input *GetIdentityMappingCommandInput) (output *models.IdentityMappingView, resp *http.Response, err error) + UpdateIdentityMappingCommand(input *UpdateIdentityMappingCommandInput) (output *models.IdentityMappingView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/keyPairs/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/keyPairs/interface.go new file mode 100644 index 00000000..8838c439 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/keyPairs/interface.go @@ -0,0 +1,24 @@ +package keyPairs + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type KeyPairsAPI interface { + GetKeyPairsCommand(input *GetKeyPairsCommandInput) (output *models.KeyPairsView, resp *http.Response, err error) + GenerateKeyPairCommand(input *GenerateKeyPairCommandInput) (output *models.KeyPairView, resp *http.Response, err error) + ImportKeyPairCommand(input *ImportKeyPairCommandInput) (output *models.KeyPairView, resp *http.Response, err error) + KeyAlgorithms() (output *models.KeyAlgorithmsView, resp *http.Response, err error) + GetKeypairsCreatableGeneralNamesCommand() (output *models.SanTypes, resp *http.Response, err error) + DeleteKeyPairCommand(input *DeleteKeyPairCommandInput) (resp *http.Response, err error) + GetKeyPairCommand(input *GetKeyPairCommandInput) (output *models.KeyPairView, resp *http.Response, err error) + PatchKeyPairCommand(input *PatchKeyPairCommandInput) (output *models.KeyPairView, resp *http.Response, err error) + UpdateKeyPairCommand(input *UpdateKeyPairCommandInput) (output *models.KeyPairView, resp *http.Response, err error) + ExportKeyPairCert(input *ExportKeyPairCertInput) (resp *http.Response, err error) + GenerateCsrCommand(input *GenerateCsrCommandInput) (resp *http.Response, err error) + ImportCSRResponseCommand(input *ImportCSRResponseCommandInput) (output *models.KeyPairView, resp *http.Response, err error) + ExportKeyPair(input *ExportKeyPairInput) (resp *http.Response, err error) + DeleteChainCertificateCommand(input *DeleteChainCertificateCommandInput) (resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/keyPairs/keyPairs.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/keyPairs/keyPairs.go new file mode 100644 index 00000000..dee1142d --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/keyPairs/keyPairs.go @@ -0,0 +1,426 @@ +package keyPairs + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "KeyPairs" +) + +//KeyPairsService provides the API operations for making requests to +// KeyPairs endpoint. +type KeyPairsService struct { + *client.Client +} + +//New createa a new instance of the KeyPairsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a KeyPairsService from the configuration +// svc := keyPairs.New(cfg) +// +func New(cfg *config.Config) *KeyPairsService { + + return &KeyPairsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a KeyPairs operation +func (s *KeyPairsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetKeyPairsCommand - Get all Key Pairs +//RequestType: GET +//Input: input *GetKeyPairsCommandInput +func (s *KeyPairsService) GetKeyPairsCommand(input *GetKeyPairsCommandInput) (output *models.KeyPairsView, resp *http.Response, err error) { + path := "/keyPairs" + op := &request.Operation{ + Name: "GetKeyPairsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "alias": input.Alias, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.KeyPairsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetKeyPairsCommandInput - Inputs for GetKeyPairsCommand +type GetKeyPairsCommandInput struct { + Page string + NumberPerPage string + Filter string + Alias string + SortKey string + Order string +} + +//GenerateKeyPairCommand - Generate a Key Pair +//RequestType: POST +//Input: input *GenerateKeyPairCommandInput +func (s *KeyPairsService) GenerateKeyPairCommand(input *GenerateKeyPairCommandInput) (output *models.KeyPairView, resp *http.Response, err error) { + path := "/keyPairs/generate" + op := &request.Operation{ + Name: "GenerateKeyPairCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.KeyPairView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GenerateKeyPairCommandInput - Inputs for GenerateKeyPairCommand +type GenerateKeyPairCommandInput struct { + Body models.NewKeyPairConfigView +} + +//ImportKeyPairCommand - Import a Key Pair from a PKCS12 file +//RequestType: POST +//Input: input *ImportKeyPairCommandInput +func (s *KeyPairsService) ImportKeyPairCommand(input *ImportKeyPairCommandInput) (output *models.KeyPairView, resp *http.Response, err error) { + path := "/keyPairs/import" + op := &request.Operation{ + Name: "ImportKeyPairCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.KeyPairView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// ImportKeyPairCommandInput - Inputs for ImportKeyPairCommand +type ImportKeyPairCommandInput struct { + Body models.PKCS12FileImportDocView +} + +//KeyAlgorithms - Get the key algorithms supported by Key Pair generation +//RequestType: GET +//Input: +func (s *KeyPairsService) KeyAlgorithms() (output *models.KeyAlgorithmsView, resp *http.Response, err error) { + path := "/keyPairs/keyAlgorithms" + op := &request.Operation{ + Name: "KeyAlgorithms", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.KeyAlgorithmsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetKeypairsCreatableGeneralNamesCommand - Get the valid General Names for creating Subject Alternative Names +//RequestType: GET +//Input: +func (s *KeyPairsService) GetKeypairsCreatableGeneralNamesCommand() (output *models.SanTypes, resp *http.Response, err error) { + path := "/keyPairs/subjectAlternativeTypes" + op := &request.Operation{ + Name: "GetKeypairsCreatableGeneralNamesCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.SanTypes{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//DeleteKeyPairCommand - Delete a Key Pair +//RequestType: DELETE +//Input: input *DeleteKeyPairCommandInput +func (s *KeyPairsService) DeleteKeyPairCommand(input *DeleteKeyPairCommandInput) (resp *http.Response, err error) { + path := "/keyPairs/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteKeyPairCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteKeyPairCommandInput - Inputs for DeleteKeyPairCommand +type DeleteKeyPairCommandInput struct { + Id string +} + +//GetKeyPairCommand - Get a Key Pair +//RequestType: GET +//Input: input *GetKeyPairCommandInput +func (s *KeyPairsService) GetKeyPairCommand(input *GetKeyPairCommandInput) (output *models.KeyPairView, resp *http.Response, err error) { + path := "/keyPairs/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetKeyPairCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.KeyPairView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetKeyPairCommandInput - Inputs for GetKeyPairCommand +type GetKeyPairCommandInput struct { + Id string +} + +//PatchKeyPairCommand - Update the chainCertificates of a Key Pair +//RequestType: PATCH +//Input: input *PatchKeyPairCommandInput +func (s *KeyPairsService) PatchKeyPairCommand(input *PatchKeyPairCommandInput) (output *models.KeyPairView, resp *http.Response, err error) { + path := "/keyPairs/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "PatchKeyPairCommand", + HTTPMethod: "PATCH", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.KeyPairView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// PatchKeyPairCommandInput - Inputs for PatchKeyPairCommand +type PatchKeyPairCommandInput struct { + Body models.ChainCertificatesDocView + Id string +} + +//UpdateKeyPairCommand - Update a Key Pair +//RequestType: PUT +//Input: input *UpdateKeyPairCommandInput +func (s *KeyPairsService) UpdateKeyPairCommand(input *UpdateKeyPairCommandInput) (output *models.KeyPairView, resp *http.Response, err error) { + path := "/keyPairs/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateKeyPairCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.KeyPairView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateKeyPairCommandInput - Inputs for UpdateKeyPairCommand +type UpdateKeyPairCommandInput struct { + Body models.PKCS12FileImportDocView + Id string +} + +//ExportKeyPairCert - Export only the Certificate from a Key Pair +//RequestType: GET +//Input: input *ExportKeyPairCertInput +func (s *KeyPairsService) ExportKeyPairCert(input *ExportKeyPairCertInput) (resp *http.Response, err error) { + path := "/keyPairs/{id}/certificate" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "ExportKeyPairCert", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// ExportKeyPairCertInput - Inputs for ExportKeyPairCert +type ExportKeyPairCertInput struct { + Id string +} + +//GenerateCsrCommand - Generate a Certificate Signing Request for a Key Pair +//RequestType: GET +//Input: input *GenerateCsrCommandInput +func (s *KeyPairsService) GenerateCsrCommand(input *GenerateCsrCommandInput) (resp *http.Response, err error) { + path := "/keyPairs/{id}/csr" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GenerateCsrCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// GenerateCsrCommandInput - Inputs for GenerateCsrCommand +type GenerateCsrCommandInput struct { + Id string +} + +//ImportCSRResponseCommand - Import a Certificate Signing Request response +//RequestType: POST +//Input: input *ImportCSRResponseCommandInput +func (s *KeyPairsService) ImportCSRResponseCommand(input *ImportCSRResponseCommandInput) (output *models.KeyPairView, resp *http.Response, err error) { + path := "/keyPairs/{id}/csr" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "ImportCSRResponseCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.KeyPairView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// ImportCSRResponseCommandInput - Inputs for ImportCSRResponseCommand +type ImportCSRResponseCommandInput struct { + Body models.CSRResponseImportDocView + Id string +} + +//ExportKeyPair - Export a Key Pair in the PKCS12 file format +//RequestType: POST +//Input: input *ExportKeyPairInput +func (s *KeyPairsService) ExportKeyPair(input *ExportKeyPairInput) (resp *http.Response, err error) { + path := "/keyPairs/{id}/pkcs12" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "ExportKeyPair", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, input.Body, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// ExportKeyPairInput - Inputs for ExportKeyPair +type ExportKeyPairInput struct { + Body models.ExportParameters + Id string +} + +//DeleteChainCertificateCommand - Delete a Chain Certificate +//RequestType: DELETE +//Input: input *DeleteChainCertificateCommandInput +func (s *KeyPairsService) DeleteChainCertificateCommand(input *DeleteChainCertificateCommandInput) (resp *http.Response, err error) { + path := "/keyPairs/{keyPairId}/chainCertificates/{chainCertificateId}" + path = strings.Replace(path, "{keyPairId}", input.KeyPairId, -1) + + path = strings.Replace(path, "{chainCertificateId}", input.ChainCertificateId, -1) + + op := &request.Operation{ + Name: "DeleteChainCertificateCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteChainCertificateCommandInput - Inputs for DeleteChainCertificateCommand +type DeleteChainCertificateCommandInput struct { + KeyPairId string + ChainCertificateId string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/license/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/license/interface.go new file mode 100644 index 00000000..18c7d6e9 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/license/interface.go @@ -0,0 +1,12 @@ +package license + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type LicenseAPI interface { + GetLicenseCommand() (output *models.LicenseView, resp *http.Response, err error) + ImportLicenseCommand(input *ImportLicenseCommandInput) (output *models.LicenseView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/license/license.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/license/license.go new file mode 100644 index 00000000..225d55e3 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/license/license.go @@ -0,0 +1,94 @@ +package license + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "License" +) + +//LicenseService provides the API operations for making requests to +// License endpoint. +type LicenseService struct { + *client.Client +} + +//New createa a new instance of the LicenseService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a LicenseService from the configuration +// svc := license.New(cfg) +// +func New(cfg *config.Config) *LicenseService { + + return &LicenseService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a License operation +func (s *LicenseService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetLicenseCommand - Get the License File +//RequestType: GET +//Input: +func (s *LicenseService) GetLicenseCommand() (output *models.LicenseView, resp *http.Response, err error) { + path := "/license" + op := &request.Operation{ + Name: "GetLicenseCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.LicenseView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//ImportLicenseCommand - Import a License +//RequestType: POST +//Input: input *ImportLicenseCommandInput +func (s *LicenseService) ImportLicenseCommand(input *ImportLicenseCommandInput) (output *models.LicenseView, resp *http.Response, err error) { + path := "/license" + op := &request.Operation{ + Name: "ImportLicenseCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.LicenseView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// ImportLicenseCommandInput - Inputs for ImportLicenseCommand +type ImportLicenseCommandInput struct { + Body models.LicenseImportDocView +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauth/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauth/interface.go new file mode 100644 index 00000000..4cb24140 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauth/interface.go @@ -0,0 +1,13 @@ +package oauth + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type OauthAPI interface { + DeleteAuthorizationServerCommand() (resp *http.Response, err error) + GetAuthorizationServerCommand() (output *models.AuthorizationServerView, resp *http.Response, err error) + UpdateAuthorizationServerCommand(input *UpdateAuthorizationServerCommandInput) (output *models.AuthorizationServerView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauth/oauth.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauth/oauth.go new file mode 100644 index 00000000..a9e656ba --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauth/oauth.go @@ -0,0 +1,113 @@ +package oauth + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Oauth" +) + +//OauthService provides the API operations for making requests to +// Oauth endpoint. +type OauthService struct { + *client.Client +} + +//New createa a new instance of the OauthService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a OauthService from the configuration +// svc := oauth.New(cfg) +// +func New(cfg *config.Config) *OauthService { + + return &OauthService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Oauth operation +func (s *OauthService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeleteAuthorizationServerCommand - Resets the OpenID Connect Provider configuration to default values +//RequestType: DELETE +//Input: +func (s *OauthService) DeleteAuthorizationServerCommand() (resp *http.Response, err error) { + path := "/oauth/authServer" + op := &request.Operation{ + Name: "DeleteAuthorizationServerCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetAuthorizationServerCommand - Get Authorization Server configuration +//RequestType: GET +//Input: +func (s *OauthService) GetAuthorizationServerCommand() (output *models.AuthorizationServerView, resp *http.Response, err error) { + path := "/oauth/authServer" + op := &request.Operation{ + Name: "GetAuthorizationServerCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.AuthorizationServerView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateAuthorizationServerCommand - Update OAuth 2.0 Authorization Server configuration +//RequestType: PUT +//Input: input *UpdateAuthorizationServerCommandInput +func (s *OauthService) UpdateAuthorizationServerCommand(input *UpdateAuthorizationServerCommandInput) (output *models.AuthorizationServerView, resp *http.Response, err error) { + path := "/oauth/authServer" + op := &request.Operation{ + Name: "UpdateAuthorizationServerCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.AuthorizationServerView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateAuthorizationServerCommandInput - Inputs for UpdateAuthorizationServerCommand +type UpdateAuthorizationServerCommandInput struct { + Body models.AuthorizationServerView +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauthKeyManagement/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauthKeyManagement/interface.go new file mode 100644 index 00000000..7c2a6899 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauthKeyManagement/interface.go @@ -0,0 +1,15 @@ +package oauthKeyManagement + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type OauthKeyManagementAPI interface { + DeleteOAuthKeyManagementCommand() (resp *http.Response, err error) + GetOAuthKeyManagementCommand() (output *models.OAuthKeyManagementView, resp *http.Response, err error) + UpdateOAuthKeyManagementCommand(input *UpdateOAuthKeyManagementCommandInput) (output *models.OAuthKeyManagementView, resp *http.Response, err error) + GetOAuthKeySetCommand() (output *models.KeySetView, resp *http.Response, err error) + UpdateOAuthKeySetCommand(input *UpdateOAuthKeySetCommandInput) (output *models.KeySetView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauthKeyManagement/oauthKeyManagement.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauthKeyManagement/oauthKeyManagement.go new file mode 100644 index 00000000..c7b1b2a3 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oauthKeyManagement/oauthKeyManagement.go @@ -0,0 +1,157 @@ +package oauthKeyManagement + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "OauthKeyManagement" +) + +//OauthKeyManagementService provides the API operations for making requests to +// OauthKeyManagement endpoint. +type OauthKeyManagementService struct { + *client.Client +} + +//New createa a new instance of the OauthKeyManagementService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a OauthKeyManagementService from the configuration +// svc := oauthKeyManagement.New(cfg) +// +func New(cfg *config.Config) *OauthKeyManagementService { + + return &OauthKeyManagementService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a OauthKeyManagement operation +func (s *OauthKeyManagementService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeleteOAuthKeyManagementCommand - Resets the OAuth Key Management configuration to default values +//RequestType: DELETE +//Input: +func (s *OauthKeyManagementService) DeleteOAuthKeyManagementCommand() (resp *http.Response, err error) { + path := "/oauthKeyManagement" + op := &request.Operation{ + Name: "DeleteOAuthKeyManagementCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetOAuthKeyManagementCommand - Get the OAuth Key Management configuration +//RequestType: GET +//Input: +func (s *OauthKeyManagementService) GetOAuthKeyManagementCommand() (output *models.OAuthKeyManagementView, resp *http.Response, err error) { + path := "/oauthKeyManagement" + op := &request.Operation{ + Name: "GetOAuthKeyManagementCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.OAuthKeyManagementView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateOAuthKeyManagementCommand - Update the OAuth Key Management configuration +//RequestType: PUT +//Input: input *UpdateOAuthKeyManagementCommandInput +func (s *OauthKeyManagementService) UpdateOAuthKeyManagementCommand(input *UpdateOAuthKeyManagementCommandInput) (output *models.OAuthKeyManagementView, resp *http.Response, err error) { + path := "/oauthKeyManagement" + op := &request.Operation{ + Name: "UpdateOAuthKeyManagementCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.OAuthKeyManagementView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateOAuthKeyManagementCommandInput - Inputs for UpdateOAuthKeyManagementCommand +type UpdateOAuthKeyManagementCommandInput struct { + Body models.OAuthKeyManagementView +} + +//GetOAuthKeySetCommand - Get the OAuth key set configuration +//RequestType: GET +//Input: +func (s *OauthKeyManagementService) GetOAuthKeySetCommand() (output *models.KeySetView, resp *http.Response, err error) { + path := "/oauthKeyManagement/keySet" + op := &request.Operation{ + Name: "GetOAuthKeySetCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.KeySetView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateOAuthKeySetCommand - Update the OAuth key set configuration +//RequestType: PUT +//Input: input *UpdateOAuthKeySetCommandInput +func (s *OauthKeyManagementService) UpdateOAuthKeySetCommand(input *UpdateOAuthKeySetCommandInput) (output *models.KeySetView, resp *http.Response, err error) { + path := "/oauthKeyManagement/keySet" + op := &request.Operation{ + Name: "UpdateOAuthKeySetCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.KeySetView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateOAuthKeySetCommandInput - Inputs for UpdateOAuthKeySetCommand +type UpdateOAuthKeySetCommandInput struct { + Body models.KeySetView +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oidc/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oidc/interface.go new file mode 100644 index 00000000..f3028bed --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oidc/interface.go @@ -0,0 +1,16 @@ +package oidc + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type OidcAPI interface { + DeleteOIDCProviderCommand() (resp *http.Response, err error) + GetOIDCProviderCommand() (output *models.OIDCProviderView, resp *http.Response, err error) + UpdateOIDCProviderCommand(input *UpdateOIDCProviderCommandInput) (output *models.OIDCProviderView, resp *http.Response, err error) + GetOIDCProviderPluginDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) + GetOIDCProviderPluginDescriptorCommand(input *GetOIDCProviderPluginDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) + GetOIDCProviderMetadataCommand() (output *models.OIDCProviderMetadata, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oidc/oidc.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oidc/oidc.go new file mode 100644 index 00000000..5d98d232 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/oidc/oidc.go @@ -0,0 +1,179 @@ +package oidc + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Oidc" +) + +//OidcService provides the API operations for making requests to +// Oidc endpoint. +type OidcService struct { + *client.Client +} + +//New createa a new instance of the OidcService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a OidcService from the configuration +// svc := oidc.New(cfg) +// +func New(cfg *config.Config) *OidcService { + + return &OidcService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Oidc operation +func (s *OidcService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeleteOIDCProviderCommand - Resets the OpenID Connect Provider configuration to default values +//RequestType: DELETE +//Input: +func (s *OidcService) DeleteOIDCProviderCommand() (resp *http.Response, err error) { + path := "/oidc/provider" + op := &request.Operation{ + Name: "DeleteOIDCProviderCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetOIDCProviderCommand - Get the OpenID Connect Provider configuration +//RequestType: GET +//Input: +func (s *OidcService) GetOIDCProviderCommand() (output *models.OIDCProviderView, resp *http.Response, err error) { + path := "/oidc/provider" + op := &request.Operation{ + Name: "GetOIDCProviderCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.OIDCProviderView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateOIDCProviderCommand - Update the OpenID Connect Provider configuration +//RequestType: PUT +//Input: input *UpdateOIDCProviderCommandInput +func (s *OidcService) UpdateOIDCProviderCommand(input *UpdateOIDCProviderCommandInput) (output *models.OIDCProviderView, resp *http.Response, err error) { + path := "/oidc/provider" + op := &request.Operation{ + Name: "UpdateOIDCProviderCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.OIDCProviderView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateOIDCProviderCommandInput - Inputs for UpdateOIDCProviderCommand +type UpdateOIDCProviderCommandInput struct { + Body models.OIDCProviderView +} + +//GetOIDCProviderPluginDescriptorsCommand - Get descriptors for all OIDC Provider plugins +//RequestType: GET +//Input: +func (s *OidcService) GetOIDCProviderPluginDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) { + path := "/oidc/provider/descriptors" + op := &request.Operation{ + Name: "GetOIDCProviderPluginDescriptorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.DescriptorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetOIDCProviderPluginDescriptorCommand - Get a descriptor for a OIDC Provider plugin +//RequestType: GET +//Input: input *GetOIDCProviderPluginDescriptorCommandInput +func (s *OidcService) GetOIDCProviderPluginDescriptorCommand(input *GetOIDCProviderPluginDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) { + path := "/oidc/provider/descriptors/{pluginType}" + path = strings.Replace(path, "{pluginType}", input.PluginType, -1) + + op := &request.Operation{ + Name: "GetOIDCProviderPluginDescriptorCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.DescriptorView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetOIDCProviderPluginDescriptorCommandInput - Inputs for GetOIDCProviderPluginDescriptorCommand +type GetOIDCProviderPluginDescriptorCommandInput struct { + PluginType string +} + +//GetOIDCProviderMetadataCommand - Get the OpenID Connect Provider's metadata +//RequestType: GET +//Input: +func (s *OidcService) GetOIDCProviderMetadataCommand() (output *models.OIDCProviderMetadata, resp *http.Response, err error) { + path := "/oidc/provider/metadata" + op := &request.Operation{ + Name: "GetOIDCProviderMetadataCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.OIDCProviderMetadata{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingfederate/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingfederate/interface.go new file mode 100644 index 00000000..fd305217 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingfederate/interface.go @@ -0,0 +1,24 @@ +package pingfederate + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type PingfederateAPI interface { + DeletePingFederateCommand() (resp *http.Response, err error) + GetPingFederateCommand() (output *models.PingFederateRuntimeView, resp *http.Response, err error) + UpdatePingFederateCommand(input *UpdatePingFederateCommandInput) (output *models.PingFederateRuntimeView, resp *http.Response, err error) + DeletePingFederateAccessTokensCommand() (resp *http.Response, err error) + GetPingFederateAccessTokensCommand() (output *models.PingFederateAccessTokenView, resp *http.Response, err error) + UpdatePingFederateAccessTokensCommand(input *UpdatePingFederateAccessTokensCommandInput) (output *models.PingFederateAccessTokenView, resp *http.Response, err error) + DeletePingFederateAdminCommand() (resp *http.Response, err error) + GetPingFederateAdminCommand() (output *models.PingFederateAdminView, resp *http.Response, err error) + UpdatePingFederateAdminCommand(input *UpdatePingFederateAdminCommandInput) (output *models.PingFederateAdminView, resp *http.Response, err error) + GetLegacyPingFederateMetadataCommand() (output *models.OIDCProviderMetadata, resp *http.Response, err error) + DeletePingFederateRuntimeCommand() (resp *http.Response, err error) + GetPingFederateRuntimeCommand() (output *models.PingFederateMetadataRuntimeView, resp *http.Response, err error) + UpdatePingFederateRuntimeCommand(input *UpdatePingFederateRuntimeCommandInput) (output *models.PingFederateMetadataRuntimeView, resp *http.Response, err error) + GetPingFederateMetadataCommand() (output *models.OIDCProviderMetadata, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingfederate/pingfederate.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingfederate/pingfederate.go new file mode 100644 index 00000000..ac6ced4d --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingfederate/pingfederate.go @@ -0,0 +1,340 @@ +package pingfederate + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Pingfederate" +) + +//PingfederateService provides the API operations for making requests to +// Pingfederate endpoint. +type PingfederateService struct { + *client.Client +} + +//New createa a new instance of the PingfederateService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a PingfederateService from the configuration +// svc := pingfederate.New(cfg) +// +func New(cfg *config.Config) *PingfederateService { + + return &PingfederateService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Pingfederate operation +func (s *PingfederateService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeletePingFederateCommand - [Attention: This endpoint "/pingfederate" is deprecated. Please use /pingfederate/runtime to configure PingFederate instead] Resets the PingFederate configuration to default values +//RequestType: DELETE +//Input: +func (s *PingfederateService) DeletePingFederateCommand() (resp *http.Response, err error) { + path := "/pingfederate" + op := &request.Operation{ + Name: "DeletePingFederateCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetPingFederateCommand - [Attention: This endpoint "/pingfederate" is deprecated. Please use /pingfederate/runtime to configure PingFederate instead] Get the PingFederate configuration +//RequestType: GET +//Input: +func (s *PingfederateService) GetPingFederateCommand() (output *models.PingFederateRuntimeView, resp *http.Response, err error) { + path := "/pingfederate" + op := &request.Operation{ + Name: "GetPingFederateCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.PingFederateRuntimeView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdatePingFederateCommand - [Attention: This endpoint "/pingfederate" is deprecated. Please use /pingfederate/runtime to configure PingFederate instead] Update the PingFederate configuration +//RequestType: PUT +//Input: input *UpdatePingFederateCommandInput +func (s *PingfederateService) UpdatePingFederateCommand(input *UpdatePingFederateCommandInput) (output *models.PingFederateRuntimeView, resp *http.Response, err error) { + path := "/pingfederate" + op := &request.Operation{ + Name: "UpdatePingFederateCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.PingFederateRuntimeView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdatePingFederateCommandInput - Inputs for UpdatePingFederateCommand +type UpdatePingFederateCommandInput struct { + Body models.PingFederateRuntimeView +} + +//DeletePingFederateAccessTokensCommand - Resets the PingAccess OAuth Client configuration to default values +//RequestType: DELETE +//Input: +func (s *PingfederateService) DeletePingFederateAccessTokensCommand() (resp *http.Response, err error) { + path := "/pingfederate/accessTokens" + op := &request.Operation{ + Name: "DeletePingFederateAccessTokensCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetPingFederateAccessTokensCommand - Get the PingAccess OAuth Client configuration +//RequestType: GET +//Input: +func (s *PingfederateService) GetPingFederateAccessTokensCommand() (output *models.PingFederateAccessTokenView, resp *http.Response, err error) { + path := "/pingfederate/accessTokens" + op := &request.Operation{ + Name: "GetPingFederateAccessTokensCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.PingFederateAccessTokenView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdatePingFederateAccessTokensCommand - Update the PingFederate OAuth Client configuration +//RequestType: PUT +//Input: input *UpdatePingFederateAccessTokensCommandInput +func (s *PingfederateService) UpdatePingFederateAccessTokensCommand(input *UpdatePingFederateAccessTokensCommandInput) (output *models.PingFederateAccessTokenView, resp *http.Response, err error) { + path := "/pingfederate/accessTokens" + op := &request.Operation{ + Name: "UpdatePingFederateAccessTokensCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.PingFederateAccessTokenView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdatePingFederateAccessTokensCommandInput - Inputs for UpdatePingFederateAccessTokensCommand +type UpdatePingFederateAccessTokensCommandInput struct { + Body models.PingFederateAccessTokenView +} + +//DeletePingFederateAdminCommand - Resets the PingFederate Admin configuration to default values +//RequestType: DELETE +//Input: +func (s *PingfederateService) DeletePingFederateAdminCommand() (resp *http.Response, err error) { + path := "/pingfederate/admin" + op := &request.Operation{ + Name: "DeletePingFederateAdminCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetPingFederateAdminCommand - Get the PingFederate Admin configuration +//RequestType: GET +//Input: +func (s *PingfederateService) GetPingFederateAdminCommand() (output *models.PingFederateAdminView, resp *http.Response, err error) { + path := "/pingfederate/admin" + op := &request.Operation{ + Name: "GetPingFederateAdminCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.PingFederateAdminView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdatePingFederateAdminCommand - Update the PingFederate Admin configuration +//RequestType: PUT +//Input: input *UpdatePingFederateAdminCommandInput +func (s *PingfederateService) UpdatePingFederateAdminCommand(input *UpdatePingFederateAdminCommandInput) (output *models.PingFederateAdminView, resp *http.Response, err error) { + path := "/pingfederate/admin" + op := &request.Operation{ + Name: "UpdatePingFederateAdminCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.PingFederateAdminView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdatePingFederateAdminCommandInput - Inputs for UpdatePingFederateAdminCommand +type UpdatePingFederateAdminCommandInput struct { + Body models.PingFederateAdminView +} + +//GetLegacyPingFederateMetadataCommand - [Attention: The endpoint "/pingfederate" is deprecated. This metadata corresponds to that configuration."/pingfederate/runtime" and "/pingfederate/runtime/metadata" should be used instead.] Get the PingFederate metadata +//RequestType: GET +//Input: +func (s *PingfederateService) GetLegacyPingFederateMetadataCommand() (output *models.OIDCProviderMetadata, resp *http.Response, err error) { + path := "/pingfederate/metadata" + op := &request.Operation{ + Name: "GetLegacyPingFederateMetadataCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.OIDCProviderMetadata{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//DeletePingFederateRuntimeCommand - Resets the PingFederate configuration +//RequestType: DELETE +//Input: +func (s *PingfederateService) DeletePingFederateRuntimeCommand() (resp *http.Response, err error) { + path := "/pingfederate/runtime" + op := &request.Operation{ + Name: "DeletePingFederateRuntimeCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetPingFederateRuntimeCommand - Get the PingFederate configuration +//RequestType: GET +//Input: +func (s *PingfederateService) GetPingFederateRuntimeCommand() (output *models.PingFederateMetadataRuntimeView, resp *http.Response, err error) { + path := "/pingfederate/runtime" + op := &request.Operation{ + Name: "GetPingFederateRuntimeCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.PingFederateMetadataRuntimeView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdatePingFederateRuntimeCommand - Update the PingFederate configuration +//RequestType: PUT +//Input: input *UpdatePingFederateRuntimeCommandInput +func (s *PingfederateService) UpdatePingFederateRuntimeCommand(input *UpdatePingFederateRuntimeCommandInput) (output *models.PingFederateMetadataRuntimeView, resp *http.Response, err error) { + path := "/pingfederate/runtime" + op := &request.Operation{ + Name: "UpdatePingFederateRuntimeCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.PingFederateMetadataRuntimeView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdatePingFederateRuntimeCommandInput - Inputs for UpdatePingFederateRuntimeCommand +type UpdatePingFederateRuntimeCommandInput struct { + Body models.PingFederateMetadataRuntimeView +} + +//GetPingFederateMetadataCommand - Get the PingFederate Runtime metadata +//RequestType: GET +//Input: +func (s *PingfederateService) GetPingFederateMetadataCommand() (output *models.OIDCProviderMetadata, resp *http.Response, err error) { + path := "/pingfederate/runtime/metadata" + op := &request.Operation{ + Name: "GetPingFederateMetadataCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.OIDCProviderMetadata{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingone/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingone/interface.go new file mode 100644 index 00000000..7a49941e --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingone/interface.go @@ -0,0 +1,14 @@ +package pingone + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type PingoneAPI interface { + DeletePingOne4CCommand() (resp *http.Response, err error) + GetPingOne4CCommand() (output *models.PingOne4CView, resp *http.Response, err error) + UpdatePingOne4CCommand(input *UpdatePingOne4CCommandInput) (output *models.PingOne4CView, resp *http.Response, err error) + GetPingOne4CMetadataCommand() (output *models.OIDCProviderMetadata, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingone/pingone.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingone/pingone.go new file mode 100644 index 00000000..e4d7e099 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/pingone/pingone.go @@ -0,0 +1,132 @@ +package pingone + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Pingone" +) + +//PingoneService provides the API operations for making requests to +// Pingone endpoint. +type PingoneService struct { + *client.Client +} + +//New createa a new instance of the PingoneService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a PingoneService from the configuration +// svc := pingone.New(cfg) +// +func New(cfg *config.Config) *PingoneService { + + return &PingoneService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Pingone operation +func (s *PingoneService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeletePingOne4CCommand - Resets the PingOne For Customers configuration to default values +//RequestType: DELETE +//Input: +func (s *PingoneService) DeletePingOne4CCommand() (resp *http.Response, err error) { + path := "/pingone/customers" + op := &request.Operation{ + Name: "DeletePingOne4CCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetPingOne4CCommand - Get the PingOne For Customers configuration +//RequestType: GET +//Input: +func (s *PingoneService) GetPingOne4CCommand() (output *models.PingOne4CView, resp *http.Response, err error) { + path := "/pingone/customers" + op := &request.Operation{ + Name: "GetPingOne4CCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.PingOne4CView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdatePingOne4CCommand - Update the PingOne For Customers configuration +//RequestType: PUT +//Input: input *UpdatePingOne4CCommandInput +func (s *PingoneService) UpdatePingOne4CCommand(input *UpdatePingOne4CCommandInput) (output *models.PingOne4CView, resp *http.Response, err error) { + path := "/pingone/customers" + op := &request.Operation{ + Name: "UpdatePingOne4CCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.PingOne4CView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdatePingOne4CCommandInput - Inputs for UpdatePingOne4CCommand +type UpdatePingOne4CCommandInput struct { + Body models.PingOne4CView +} + +//GetPingOne4CMetadataCommand - Get the PingOne for Customers metadata +//RequestType: GET +//Input: +func (s *PingoneService) GetPingOne4CMetadataCommand() (output *models.OIDCProviderMetadata, resp *http.Response, err error) { + path := "/pingone/customers/metadata" + op := &request.Operation{ + Name: "GetPingOne4CMetadataCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.OIDCProviderMetadata{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/proxies/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/proxies/interface.go new file mode 100644 index 00000000..63e8dc06 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/proxies/interface.go @@ -0,0 +1,15 @@ +package proxies + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type ProxiesAPI interface { + GetProxiesCommand(input *GetProxiesCommandInput) (output *models.HttpClientProxyView, resp *http.Response, err error) + AddProxyCommand(input *AddProxyCommandInput) (output *models.HttpClientProxyView, resp *http.Response, err error) + DeleteProxyCommand(input *DeleteProxyCommandInput) (resp *http.Response, err error) + GetProxyCommand(input *GetProxyCommandInput) (output *models.HttpClientProxyView, resp *http.Response, err error) + UpdateProxyCommand(input *UpdateProxyCommandInput) (output *models.HttpClientProxyView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/proxies/proxies.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/proxies/proxies.go new file mode 100644 index 00000000..80782b7c --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/proxies/proxies.go @@ -0,0 +1,195 @@ +package proxies + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Proxies" +) + +//ProxiesService provides the API operations for making requests to +// Proxies endpoint. +type ProxiesService struct { + *client.Client +} + +//New createa a new instance of the ProxiesService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a ProxiesService from the configuration +// svc := proxies.New(cfg) +// +func New(cfg *config.Config) *ProxiesService { + + return &ProxiesService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Proxies operation +func (s *ProxiesService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetProxiesCommand - Get all Proxies +//RequestType: GET +//Input: input *GetProxiesCommandInput +func (s *ProxiesService) GetProxiesCommand(input *GetProxiesCommandInput) (output *models.HttpClientProxyView, resp *http.Response, err error) { + path := "/proxies" + op := &request.Operation{ + Name: "GetProxiesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.HttpClientProxyView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetProxiesCommandInput - Inputs for GetProxiesCommand +type GetProxiesCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddProxyCommand - Create a Proxy +//RequestType: POST +//Input: input *AddProxyCommandInput +func (s *ProxiesService) AddProxyCommand(input *AddProxyCommandInput) (output *models.HttpClientProxyView, resp *http.Response, err error) { + path := "/proxies" + op := &request.Operation{ + Name: "AddProxyCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.HttpClientProxyView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddProxyCommandInput - Inputs for AddProxyCommand +type AddProxyCommandInput struct { + Body models.HttpClientProxyView +} + +//DeleteProxyCommand - Delete a Proxy +//RequestType: DELETE +//Input: input *DeleteProxyCommandInput +func (s *ProxiesService) DeleteProxyCommand(input *DeleteProxyCommandInput) (resp *http.Response, err error) { + path := "/proxies/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteProxyCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteProxyCommandInput - Inputs for DeleteProxyCommand +type DeleteProxyCommandInput struct { + Id string +} + +//GetProxyCommand - Get a Proxy +//RequestType: GET +//Input: input *GetProxyCommandInput +func (s *ProxiesService) GetProxyCommand(input *GetProxyCommandInput) (output *models.HttpClientProxyView, resp *http.Response, err error) { + path := "/proxies/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetProxyCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.HttpClientProxyView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetProxyCommandInput - Inputs for GetProxyCommand +type GetProxyCommandInput struct { + Id string +} + +//UpdateProxyCommand - Update a Proxy +//RequestType: PUT +//Input: input *UpdateProxyCommandInput +func (s *ProxiesService) UpdateProxyCommand(input *UpdateProxyCommandInput) (output *models.HttpClientProxyView, resp *http.Response, err error) { + path := "/proxies/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateProxyCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.HttpClientProxyView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateProxyCommandInput - Inputs for UpdateProxyCommand +type UpdateProxyCommandInput struct { + Body models.HttpClientProxyView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/redirects/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/redirects/interface.go new file mode 100644 index 00000000..172914fe --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/redirects/interface.go @@ -0,0 +1,15 @@ +package redirects + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type RedirectsAPI interface { + GetRedirectsCommand(input *GetRedirectsCommandInput) (output *models.RedirectsView, resp *http.Response, err error) + AddRedirectCommand(input *AddRedirectCommandInput) (output *models.RedirectView, resp *http.Response, err error) + DeleteRedirectCommand(input *DeleteRedirectCommandInput) (resp *http.Response, err error) + GetRedirectCommand(input *GetRedirectCommandInput) (output *models.RedirectView, resp *http.Response, err error) + UpdateRedirectCommand(input *UpdateRedirectCommandInput) (output *models.RedirectView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/redirects/redirects.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/redirects/redirects.go new file mode 100644 index 00000000..0cc70684 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/redirects/redirects.go @@ -0,0 +1,197 @@ +package redirects + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Redirects" +) + +//RedirectsService provides the API operations for making requests to +// Redirects endpoint. +type RedirectsService struct { + *client.Client +} + +//New createa a new instance of the RedirectsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a RedirectsService from the configuration +// svc := redirects.New(cfg) +// +func New(cfg *config.Config) *RedirectsService { + + return &RedirectsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Redirects operation +func (s *RedirectsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetRedirectsCommand - Get all Redirects +//RequestType: GET +//Input: input *GetRedirectsCommandInput +func (s *RedirectsService) GetRedirectsCommand(input *GetRedirectsCommandInput) (output *models.RedirectsView, resp *http.Response, err error) { + path := "/redirects" + op := &request.Operation{ + Name: "GetRedirectsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "source": input.Source, + "target": input.Target, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.RedirectsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetRedirectsCommandInput - Inputs for GetRedirectsCommand +type GetRedirectsCommandInput struct { + Page string + NumberPerPage string + Filter string + Source string + Target string + SortKey string + Order string +} + +//AddRedirectCommand - Add a Redirect +//RequestType: POST +//Input: input *AddRedirectCommandInput +func (s *RedirectsService) AddRedirectCommand(input *AddRedirectCommandInput) (output *models.RedirectView, resp *http.Response, err error) { + path := "/redirects" + op := &request.Operation{ + Name: "AddRedirectCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RedirectView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddRedirectCommandInput - Inputs for AddRedirectCommand +type AddRedirectCommandInput struct { + Body models.RedirectView +} + +//DeleteRedirectCommand - Delete a Redirect +//RequestType: DELETE +//Input: input *DeleteRedirectCommandInput +func (s *RedirectsService) DeleteRedirectCommand(input *DeleteRedirectCommandInput) (resp *http.Response, err error) { + path := "/redirects/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteRedirectCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteRedirectCommandInput - Inputs for DeleteRedirectCommand +type DeleteRedirectCommandInput struct { + Id string +} + +//GetRedirectCommand - Get a Redirect +//RequestType: GET +//Input: input *GetRedirectCommandInput +func (s *RedirectsService) GetRedirectCommand(input *GetRedirectCommandInput) (output *models.RedirectView, resp *http.Response, err error) { + path := "/redirects/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetRedirectCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RedirectView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetRedirectCommandInput - Inputs for GetRedirectCommand +type GetRedirectCommandInput struct { + Id string +} + +//UpdateRedirectCommand - Update a Redirect +//RequestType: PUT +//Input: input *UpdateRedirectCommandInput +func (s *RedirectsService) UpdateRedirectCommand(input *UpdateRedirectCommandInput) (output *models.RedirectView, resp *http.Response, err error) { + path := "/redirects/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateRedirectCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RedirectView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateRedirectCommandInput - Inputs for UpdateRedirectCommand +type UpdateRedirectCommandInput struct { + Body models.RedirectView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rejectionHandlers/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rejectionHandlers/interface.go new file mode 100644 index 00000000..d0bf5c73 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rejectionHandlers/interface.go @@ -0,0 +1,17 @@ +package rejectionHandlers + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type RejectionHandlersAPI interface { + GetRejectionHandlersCommand(input *GetRejectionHandlersCommandInput) (output *models.RejectionHandlersView, resp *http.Response, err error) + AddRejectionHandlerCommand(input *AddRejectionHandlerCommandInput) (output *models.RejectionHandlerView, resp *http.Response, err error) + GetRejectionHandlerDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) + GetRejecitonHandlerDescriptorCommand(input *GetRejecitonHandlerDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) + DeleteRejectionHandlerCommand(input *DeleteRejectionHandlerCommandInput) (resp *http.Response, err error) + GetRejectionHandlerCommand(input *GetRejectionHandlerCommandInput) (output *models.RejectionHandlerView, resp *http.Response, err error) + UpdateRejectionHandlerCommand(input *UpdateRejectionHandlerCommandInput) (output *models.RejectionHandlerView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rejectionHandlers/rejectionHandlers.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rejectionHandlers/rejectionHandlers.go new file mode 100644 index 00000000..084b49de --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rejectionHandlers/rejectionHandlers.go @@ -0,0 +1,241 @@ +package rejectionHandlers + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "RejectionHandlers" +) + +//RejectionHandlersService provides the API operations for making requests to +// RejectionHandlers endpoint. +type RejectionHandlersService struct { + *client.Client +} + +//New createa a new instance of the RejectionHandlersService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a RejectionHandlersService from the configuration +// svc := rejectionHandlers.New(cfg) +// +func New(cfg *config.Config) *RejectionHandlersService { + + return &RejectionHandlersService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a RejectionHandlers operation +func (s *RejectionHandlersService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetRejectionHandlersCommand - Get all Rejection Handlers +//RequestType: GET +//Input: input *GetRejectionHandlersCommandInput +func (s *RejectionHandlersService) GetRejectionHandlersCommand(input *GetRejectionHandlersCommandInput) (output *models.RejectionHandlersView, resp *http.Response, err error) { + path := "/rejectionHandlers" + op := &request.Operation{ + Name: "GetRejectionHandlersCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.RejectionHandlersView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetRejectionHandlersCommandInput - Inputs for GetRejectionHandlersCommand +type GetRejectionHandlersCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddRejectionHandlerCommand - Create a Rejection Handler +//RequestType: POST +//Input: input *AddRejectionHandlerCommandInput +func (s *RejectionHandlersService) AddRejectionHandlerCommand(input *AddRejectionHandlerCommandInput) (output *models.RejectionHandlerView, resp *http.Response, err error) { + path := "/rejectionHandlers" + op := &request.Operation{ + Name: "AddRejectionHandlerCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RejectionHandlerView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddRejectionHandlerCommandInput - Inputs for AddRejectionHandlerCommand +type AddRejectionHandlerCommandInput struct { + Body models.RejectionHandlerView +} + +//GetRejectionHandlerDescriptorsCommand - Get descriptors for all supported Rejection Handler types +//RequestType: GET +//Input: +func (s *RejectionHandlersService) GetRejectionHandlerDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) { + path := "/rejectionHandlers/descriptors" + op := &request.Operation{ + Name: "GetRejectionHandlerDescriptorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.DescriptorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetRejecitonHandlerDescriptorCommand - Get descriptor for a Rejection Handler type +//RequestType: GET +//Input: input *GetRejecitonHandlerDescriptorCommandInput +func (s *RejectionHandlersService) GetRejecitonHandlerDescriptorCommand(input *GetRejecitonHandlerDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) { + path := "/rejectionHandlers/descriptors/{rejectionHandlerType}" + path = strings.Replace(path, "{rejectionHandlerType}", input.RejectionHandlerType, -1) + + op := &request.Operation{ + Name: "GetRejecitonHandlerDescriptorCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.DescriptorView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetRejecitonHandlerDescriptorCommandInput - Inputs for GetRejecitonHandlerDescriptorCommand +type GetRejecitonHandlerDescriptorCommandInput struct { + RejectionHandlerType string +} + +//DeleteRejectionHandlerCommand - Delete a Rejection Handler +//RequestType: DELETE +//Input: input *DeleteRejectionHandlerCommandInput +func (s *RejectionHandlersService) DeleteRejectionHandlerCommand(input *DeleteRejectionHandlerCommandInput) (resp *http.Response, err error) { + path := "/rejectionHandlers/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteRejectionHandlerCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteRejectionHandlerCommandInput - Inputs for DeleteRejectionHandlerCommand +type DeleteRejectionHandlerCommandInput struct { + Id string +} + +//GetRejectionHandlerCommand - Get a Rejection Handler +//RequestType: GET +//Input: input *GetRejectionHandlerCommandInput +func (s *RejectionHandlersService) GetRejectionHandlerCommand(input *GetRejectionHandlerCommandInput) (output *models.RejectionHandlerView, resp *http.Response, err error) { + path := "/rejectionHandlers/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetRejectionHandlerCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RejectionHandlerView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetRejectionHandlerCommandInput - Inputs for GetRejectionHandlerCommand +type GetRejectionHandlerCommandInput struct { + Id string +} + +//UpdateRejectionHandlerCommand - Update a Rejection Handler +//RequestType: PUT +//Input: input *UpdateRejectionHandlerCommandInput +func (s *RejectionHandlersService) UpdateRejectionHandlerCommand(input *UpdateRejectionHandlerCommandInput) (output *models.RejectionHandlerView, resp *http.Response, err error) { + path := "/rejectionHandlers/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateRejectionHandlerCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RejectionHandlerView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateRejectionHandlerCommandInput - Inputs for UpdateRejectionHandlerCommand +type UpdateRejectionHandlerCommandInput struct { + Body models.RejectionHandlerView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rules/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rules/interface.go new file mode 100644 index 00000000..8c571206 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rules/interface.go @@ -0,0 +1,17 @@ +package rules + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type RulesAPI interface { + GetRulesCommand(input *GetRulesCommandInput) (output *models.RulesView, resp *http.Response, err error) + AddRuleCommand(input *AddRuleCommandInput) (output *models.RuleView, resp *http.Response, err error) + GetRuleDescriptorsCommand() (output *models.RuleDescriptorsView, resp *http.Response, err error) + GetRuleDescriptorCommand(input *GetRuleDescriptorCommandInput) (output *models.RuleDescriptorView, resp *http.Response, err error) + DeleteRuleCommand(input *DeleteRuleCommandInput) (resp *http.Response, err error) + GetRuleCommand(input *GetRuleCommandInput) (output *models.RuleView, resp *http.Response, err error) + UpdateRuleCommand(input *UpdateRuleCommandInput) (output *models.RuleView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rules/rules.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rules/rules.go new file mode 100644 index 00000000..f55e494b --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rules/rules.go @@ -0,0 +1,241 @@ +package rules + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Rules" +) + +//RulesService provides the API operations for making requests to +// Rules endpoint. +type RulesService struct { + *client.Client +} + +//New createa a new instance of the RulesService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a RulesService from the configuration +// svc := rules.New(cfg) +// +func New(cfg *config.Config) *RulesService { + + return &RulesService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Rules operation +func (s *RulesService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetRulesCommand - Get all Rules +//RequestType: GET +//Input: input *GetRulesCommandInput +func (s *RulesService) GetRulesCommand(input *GetRulesCommandInput) (output *models.RulesView, resp *http.Response, err error) { + path := "/rules" + op := &request.Operation{ + Name: "GetRulesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.RulesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetRulesCommandInput - Inputs for GetRulesCommand +type GetRulesCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddRuleCommand - Add a Rule +//RequestType: POST +//Input: input *AddRuleCommandInput +func (s *RulesService) AddRuleCommand(input *AddRuleCommandInput) (output *models.RuleView, resp *http.Response, err error) { + path := "/rules" + op := &request.Operation{ + Name: "AddRuleCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RuleView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddRuleCommandInput - Inputs for AddRuleCommand +type AddRuleCommandInput struct { + Body models.RuleView +} + +//GetRuleDescriptorsCommand - Get descriptors for all supported Rule types +//RequestType: GET +//Input: +func (s *RulesService) GetRuleDescriptorsCommand() (output *models.RuleDescriptorsView, resp *http.Response, err error) { + path := "/rules/descriptors" + op := &request.Operation{ + Name: "GetRuleDescriptorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.RuleDescriptorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetRuleDescriptorCommand - Get descriptor for a Rule type +//RequestType: GET +//Input: input *GetRuleDescriptorCommandInput +func (s *RulesService) GetRuleDescriptorCommand(input *GetRuleDescriptorCommandInput) (output *models.RuleDescriptorView, resp *http.Response, err error) { + path := "/rules/descriptors/{ruleType}" + path = strings.Replace(path, "{ruleType}", input.RuleType, -1) + + op := &request.Operation{ + Name: "GetRuleDescriptorCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RuleDescriptorView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetRuleDescriptorCommandInput - Inputs for GetRuleDescriptorCommand +type GetRuleDescriptorCommandInput struct { + RuleType string +} + +//DeleteRuleCommand - Delete a Rule +//RequestType: DELETE +//Input: input *DeleteRuleCommandInput +func (s *RulesService) DeleteRuleCommand(input *DeleteRuleCommandInput) (resp *http.Response, err error) { + path := "/rules/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteRuleCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteRuleCommandInput - Inputs for DeleteRuleCommand +type DeleteRuleCommandInput struct { + Id string +} + +//GetRuleCommand - Get a Rule +//RequestType: GET +//Input: input *GetRuleCommandInput +func (s *RulesService) GetRuleCommand(input *GetRuleCommandInput) (output *models.RuleView, resp *http.Response, err error) { + path := "/rules/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetRuleCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RuleView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetRuleCommandInput - Inputs for GetRuleCommand +type GetRuleCommandInput struct { + Id string +} + +//UpdateRuleCommand - Update a Rule +//RequestType: PUT +//Input: input *UpdateRuleCommandInput +func (s *RulesService) UpdateRuleCommand(input *UpdateRuleCommandInput) (output *models.RuleView, resp *http.Response, err error) { + path := "/rules/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateRuleCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RuleView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateRuleCommandInput - Inputs for UpdateRuleCommand +type UpdateRuleCommandInput struct { + Body models.RuleView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rulesets/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rulesets/interface.go new file mode 100644 index 00000000..c306d97c --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rulesets/interface.go @@ -0,0 +1,17 @@ +package rulesets + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type RulesetsAPI interface { + GetRuleSetsCommand(input *GetRuleSetsCommandInput) (output *models.RuleSetsView, resp *http.Response, err error) + AddRuleSetCommand(input *AddRuleSetCommandInput) (output *models.RuleSetView, resp *http.Response, err error) + GetRuleSetElementTypesCommand() (output *models.RuleSetElementTypesView, resp *http.Response, err error) + GetRuleSetSuccessCriteriaCommand() (output *models.RuleSetSuccessCriteriaView, resp *http.Response, err error) + DeleteRuleSetCommand(input *DeleteRuleSetCommandInput) (resp *http.Response, err error) + GetRuleSetCommand(input *GetRuleSetCommandInput) (output *models.RuleSetView, resp *http.Response, err error) + UpdateRuleSetCommand(input *UpdateRuleSetCommandInput) (output *models.RuleSetView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rulesets/rulesets.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rulesets/rulesets.go new file mode 100644 index 00000000..98333298 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/rulesets/rulesets.go @@ -0,0 +1,233 @@ +package rulesets + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Rulesets" +) + +//RulesetsService provides the API operations for making requests to +// Rulesets endpoint. +type RulesetsService struct { + *client.Client +} + +//New createa a new instance of the RulesetsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a RulesetsService from the configuration +// svc := rulesets.New(cfg) +// +func New(cfg *config.Config) *RulesetsService { + + return &RulesetsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Rulesets operation +func (s *RulesetsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetRuleSetsCommand - Get all Rule Sets +//RequestType: GET +//Input: input *GetRuleSetsCommandInput +func (s *RulesetsService) GetRuleSetsCommand(input *GetRuleSetsCommandInput) (output *models.RuleSetsView, resp *http.Response, err error) { + path := "/rulesets" + op := &request.Operation{ + Name: "GetRuleSetsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.RuleSetsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetRuleSetsCommandInput - Inputs for GetRuleSetsCommand +type GetRuleSetsCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddRuleSetCommand - Add a Rule Set +//RequestType: POST +//Input: input *AddRuleSetCommandInput +func (s *RulesetsService) AddRuleSetCommand(input *AddRuleSetCommandInput) (output *models.RuleSetView, resp *http.Response, err error) { + path := "/rulesets" + op := &request.Operation{ + Name: "AddRuleSetCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RuleSetView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddRuleSetCommandInput - Inputs for AddRuleSetCommand +type AddRuleSetCommandInput struct { + Body models.RuleSetView +} + +//GetRuleSetElementTypesCommand - Get all Rule Set Element Types +//RequestType: GET +//Input: +func (s *RulesetsService) GetRuleSetElementTypesCommand() (output *models.RuleSetElementTypesView, resp *http.Response, err error) { + path := "/rulesets/elementTypes" + op := &request.Operation{ + Name: "GetRuleSetElementTypesCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.RuleSetElementTypesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetRuleSetSuccessCriteriaCommand - Get all Success Criteria +//RequestType: GET +//Input: +func (s *RulesetsService) GetRuleSetSuccessCriteriaCommand() (output *models.RuleSetSuccessCriteriaView, resp *http.Response, err error) { + path := "/rulesets/successCriteria" + op := &request.Operation{ + Name: "GetRuleSetSuccessCriteriaCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.RuleSetSuccessCriteriaView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//DeleteRuleSetCommand - Delete a Rule Set +//RequestType: DELETE +//Input: input *DeleteRuleSetCommandInput +func (s *RulesetsService) DeleteRuleSetCommand(input *DeleteRuleSetCommandInput) (resp *http.Response, err error) { + path := "/rulesets/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteRuleSetCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteRuleSetCommandInput - Inputs for DeleteRuleSetCommand +type DeleteRuleSetCommandInput struct { + Id string +} + +//GetRuleSetCommand - Get a Rule Set +//RequestType: GET +//Input: input *GetRuleSetCommandInput +func (s *RulesetsService) GetRuleSetCommand(input *GetRuleSetCommandInput) (output *models.RuleSetView, resp *http.Response, err error) { + path := "/rulesets/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetRuleSetCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RuleSetView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetRuleSetCommandInput - Inputs for GetRuleSetCommand +type GetRuleSetCommandInput struct { + Id string +} + +//UpdateRuleSetCommand - Update a Rule Set +//RequestType: PUT +//Input: input *UpdateRuleSetCommandInput +func (s *RulesetsService) UpdateRuleSetCommand(input *UpdateRuleSetCommandInput) (output *models.RuleSetView, resp *http.Response, err error) { + path := "/rulesets/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateRuleSetCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.RuleSetView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateRuleSetCommandInput - Inputs for UpdateRuleSetCommand +type UpdateRuleSetCommandInput struct { + Body models.RuleSetView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sharedSecrets/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sharedSecrets/interface.go new file mode 100644 index 00000000..f6356f7b --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sharedSecrets/interface.go @@ -0,0 +1,14 @@ +package sharedSecrets + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type SharedSecretsAPI interface { + GetSharedSecretsCommand(input *GetSharedSecretsCommandInput) (output *models.SharedSecretsView, resp *http.Response, err error) + AddSharedSecretCommand(input *AddSharedSecretCommandInput) (output *models.SharedSecretView, resp *http.Response, err error) + DeleteSharedSecretCommand(input *DeleteSharedSecretCommandInput) (resp *http.Response, err error) + GetSharedSecretCommand(input *GetSharedSecretCommandInput) (output *models.SharedSecretView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sharedSecrets/sharedSecrets.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sharedSecrets/sharedSecrets.go new file mode 100644 index 00000000..9e081db9 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sharedSecrets/sharedSecrets.go @@ -0,0 +1,159 @@ +package sharedSecrets + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "SharedSecrets" +) + +//SharedSecretsService provides the API operations for making requests to +// SharedSecrets endpoint. +type SharedSecretsService struct { + *client.Client +} + +//New createa a new instance of the SharedSecretsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a SharedSecretsService from the configuration +// svc := sharedSecrets.New(cfg) +// +func New(cfg *config.Config) *SharedSecretsService { + + return &SharedSecretsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a SharedSecrets operation +func (s *SharedSecretsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetSharedSecretsCommand - Get all Shared Secrets +//RequestType: GET +//Input: input *GetSharedSecretsCommandInput +func (s *SharedSecretsService) GetSharedSecretsCommand(input *GetSharedSecretsCommandInput) (output *models.SharedSecretsView, resp *http.Response, err error) { + path := "/sharedSecrets" + op := &request.Operation{ + Name: "GetSharedSecretsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.SharedSecretsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetSharedSecretsCommandInput - Inputs for GetSharedSecretsCommand +type GetSharedSecretsCommandInput struct { + SortKey string + Order string +} + +//AddSharedSecretCommand - Create a Shared Secret +//RequestType: POST +//Input: input *AddSharedSecretCommandInput +func (s *SharedSecretsService) AddSharedSecretCommand(input *AddSharedSecretCommandInput) (output *models.SharedSecretView, resp *http.Response, err error) { + path := "/sharedSecrets" + op := &request.Operation{ + Name: "AddSharedSecretCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.SharedSecretView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddSharedSecretCommandInput - Inputs for AddSharedSecretCommand +type AddSharedSecretCommandInput struct { + Body models.SharedSecretView +} + +//DeleteSharedSecretCommand - Delete a Shared Secret +//RequestType: DELETE +//Input: input *DeleteSharedSecretCommandInput +func (s *SharedSecretsService) DeleteSharedSecretCommand(input *DeleteSharedSecretCommandInput) (resp *http.Response, err error) { + path := "/sharedSecrets/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteSharedSecretCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteSharedSecretCommandInput - Inputs for DeleteSharedSecretCommand +type DeleteSharedSecretCommandInput struct { + Id string +} + +//GetSharedSecretCommand - Get a Shared Secret +//RequestType: GET +//Input: input *GetSharedSecretCommandInput +func (s *SharedSecretsService) GetSharedSecretCommand(input *GetSharedSecretCommandInput) (output *models.SharedSecretView, resp *http.Response, err error) { + path := "/sharedSecrets/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetSharedSecretCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.SharedSecretView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetSharedSecretCommandInput - Inputs for GetSharedSecretCommand +type GetSharedSecretCommandInput struct { + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators/interface.go new file mode 100644 index 00000000..4e132946 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators/interface.go @@ -0,0 +1,17 @@ +package siteAuthenticators + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type SiteAuthenticatorsAPI interface { + GetSiteAuthenticatorsCommand(input *GetSiteAuthenticatorsCommandInput) (output *models.SiteAuthenticatorsView, resp *http.Response, err error) + AddSiteAuthenticatorCommand(input *AddSiteAuthenticatorCommandInput) (output *models.SiteAuthenticatorView, resp *http.Response, err error) + GetSiteAuthenticatorDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) + GetSiteAuthenticatorDescriptorCommand(input *GetSiteAuthenticatorDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) + DeleteSiteAuthenticatorCommand(input *DeleteSiteAuthenticatorCommandInput) (resp *http.Response, err error) + GetSiteAuthenticatorCommand(input *GetSiteAuthenticatorCommandInput) (output *models.SiteAuthenticatorView, resp *http.Response, err error) + UpdateSiteAuthenticatorCommand(input *UpdateSiteAuthenticatorCommandInput) (output *models.SiteAuthenticatorView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators/siteAuthenticators.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators/siteAuthenticators.go new file mode 100644 index 00000000..cc459ec9 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators/siteAuthenticators.go @@ -0,0 +1,241 @@ +package siteAuthenticators + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "SiteAuthenticators" +) + +//SiteAuthenticatorsService provides the API operations for making requests to +// SiteAuthenticators endpoint. +type SiteAuthenticatorsService struct { + *client.Client +} + +//New createa a new instance of the SiteAuthenticatorsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a SiteAuthenticatorsService from the configuration +// svc := siteAuthenticators.New(cfg) +// +func New(cfg *config.Config) *SiteAuthenticatorsService { + + return &SiteAuthenticatorsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a SiteAuthenticators operation +func (s *SiteAuthenticatorsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetSiteAuthenticatorsCommand - Get all Site Authenticators +//RequestType: GET +//Input: input *GetSiteAuthenticatorsCommandInput +func (s *SiteAuthenticatorsService) GetSiteAuthenticatorsCommand(input *GetSiteAuthenticatorsCommandInput) (output *models.SiteAuthenticatorsView, resp *http.Response, err error) { + path := "/siteAuthenticators" + op := &request.Operation{ + Name: "GetSiteAuthenticatorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.SiteAuthenticatorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetSiteAuthenticatorsCommandInput - Inputs for GetSiteAuthenticatorsCommand +type GetSiteAuthenticatorsCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddSiteAuthenticatorCommand - Create a Site Authenticator +//RequestType: POST +//Input: input *AddSiteAuthenticatorCommandInput +func (s *SiteAuthenticatorsService) AddSiteAuthenticatorCommand(input *AddSiteAuthenticatorCommandInput) (output *models.SiteAuthenticatorView, resp *http.Response, err error) { + path := "/siteAuthenticators" + op := &request.Operation{ + Name: "AddSiteAuthenticatorCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.SiteAuthenticatorView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddSiteAuthenticatorCommandInput - Inputs for AddSiteAuthenticatorCommand +type AddSiteAuthenticatorCommandInput struct { + Body models.SiteAuthenticatorView +} + +//GetSiteAuthenticatorDescriptorsCommand - Get descriptors for all supported Site Authenticator types +//RequestType: GET +//Input: +func (s *SiteAuthenticatorsService) GetSiteAuthenticatorDescriptorsCommand() (output *models.DescriptorsView, resp *http.Response, err error) { + path := "/siteAuthenticators/descriptors" + op := &request.Operation{ + Name: "GetSiteAuthenticatorDescriptorsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.DescriptorsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetSiteAuthenticatorDescriptorCommand - Get descriptor for a Site Authenticator type +//RequestType: GET +//Input: input *GetSiteAuthenticatorDescriptorCommandInput +func (s *SiteAuthenticatorsService) GetSiteAuthenticatorDescriptorCommand(input *GetSiteAuthenticatorDescriptorCommandInput) (output *models.DescriptorView, resp *http.Response, err error) { + path := "/siteAuthenticators/descriptors/{siteAuthenticatorType}" + path = strings.Replace(path, "{siteAuthenticatorType}", input.SiteAuthenticatorType, -1) + + op := &request.Operation{ + Name: "GetSiteAuthenticatorDescriptorCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.DescriptorView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetSiteAuthenticatorDescriptorCommandInput - Inputs for GetSiteAuthenticatorDescriptorCommand +type GetSiteAuthenticatorDescriptorCommandInput struct { + SiteAuthenticatorType string +} + +//DeleteSiteAuthenticatorCommand - Delete a Site Authenticator +//RequestType: DELETE +//Input: input *DeleteSiteAuthenticatorCommandInput +func (s *SiteAuthenticatorsService) DeleteSiteAuthenticatorCommand(input *DeleteSiteAuthenticatorCommandInput) (resp *http.Response, err error) { + path := "/siteAuthenticators/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteSiteAuthenticatorCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteSiteAuthenticatorCommandInput - Inputs for DeleteSiteAuthenticatorCommand +type DeleteSiteAuthenticatorCommandInput struct { + Id string +} + +//GetSiteAuthenticatorCommand - Get a Site Authenticator +//RequestType: GET +//Input: input *GetSiteAuthenticatorCommandInput +func (s *SiteAuthenticatorsService) GetSiteAuthenticatorCommand(input *GetSiteAuthenticatorCommandInput) (output *models.SiteAuthenticatorView, resp *http.Response, err error) { + path := "/siteAuthenticators/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetSiteAuthenticatorCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.SiteAuthenticatorView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetSiteAuthenticatorCommandInput - Inputs for GetSiteAuthenticatorCommand +type GetSiteAuthenticatorCommandInput struct { + Id string +} + +//UpdateSiteAuthenticatorCommand - Update a Site Authenticator +//RequestType: PUT +//Input: input *UpdateSiteAuthenticatorCommandInput +func (s *SiteAuthenticatorsService) UpdateSiteAuthenticatorCommand(input *UpdateSiteAuthenticatorCommandInput) (output *models.SiteAuthenticatorView, resp *http.Response, err error) { + path := "/siteAuthenticators/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateSiteAuthenticatorCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.SiteAuthenticatorView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateSiteAuthenticatorCommandInput - Inputs for UpdateSiteAuthenticatorCommand +type UpdateSiteAuthenticatorCommandInput struct { + Body models.SiteAuthenticatorView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sites/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sites/interface.go new file mode 100644 index 00000000..d9b1d22b --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sites/interface.go @@ -0,0 +1,15 @@ +package sites + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type SitesAPI interface { + GetSitesCommand(input *GetSitesCommandInput) (output *models.SitesView, resp *http.Response, err error) + AddSiteCommand(input *AddSiteCommandInput) (output *models.SiteView, resp *http.Response, err error) + DeleteSiteCommand(input *DeleteSiteCommandInput) (resp *http.Response, err error) + GetSiteCommand(input *GetSiteCommandInput) (output *models.SiteView, resp *http.Response, err error) + UpdateSiteCommand(input *UpdateSiteCommandInput) (output *models.SiteView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sites/sites.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sites/sites.go new file mode 100644 index 00000000..076c7e82 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/sites/sites.go @@ -0,0 +1,195 @@ +package sites + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Sites" +) + +//SitesService provides the API operations for making requests to +// Sites endpoint. +type SitesService struct { + *client.Client +} + +//New createa a new instance of the SitesService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a SitesService from the configuration +// svc := sites.New(cfg) +// +func New(cfg *config.Config) *SitesService { + + return &SitesService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Sites operation +func (s *SitesService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetSitesCommand - Get all Sites +//RequestType: GET +//Input: input *GetSitesCommandInput +func (s *SitesService) GetSitesCommand(input *GetSitesCommandInput) (output *models.SitesView, resp *http.Response, err error) { + path := "/sites" + op := &request.Operation{ + Name: "GetSitesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.SitesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetSitesCommandInput - Inputs for GetSitesCommand +type GetSitesCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddSiteCommand - Create a Site +//RequestType: POST +//Input: input *AddSiteCommandInput +func (s *SitesService) AddSiteCommand(input *AddSiteCommandInput) (output *models.SiteView, resp *http.Response, err error) { + path := "/sites" + op := &request.Operation{ + Name: "AddSiteCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.SiteView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddSiteCommandInput - Inputs for AddSiteCommand +type AddSiteCommandInput struct { + Body models.SiteView +} + +//DeleteSiteCommand - Delete a Site +//RequestType: DELETE +//Input: input *DeleteSiteCommandInput +func (s *SitesService) DeleteSiteCommand(input *DeleteSiteCommandInput) (resp *http.Response, err error) { + path := "/sites/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteSiteCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteSiteCommandInput - Inputs for DeleteSiteCommand +type DeleteSiteCommandInput struct { + Id string +} + +//GetSiteCommand - Get a Site +//RequestType: GET +//Input: input *GetSiteCommandInput +func (s *SitesService) GetSiteCommand(input *GetSiteCommandInput) (output *models.SiteView, resp *http.Response, err error) { + path := "/sites/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetSiteCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.SiteView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetSiteCommandInput - Inputs for GetSiteCommand +type GetSiteCommandInput struct { + Id string +} + +//UpdateSiteCommand - Update a Site +//RequestType: PUT +//Input: input *UpdateSiteCommandInput +func (s *SitesService) UpdateSiteCommand(input *UpdateSiteCommandInput) (output *models.SiteView, resp *http.Response, err error) { + path := "/sites/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateSiteCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.SiteView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateSiteCommandInput - Inputs for UpdateSiteCommand +type UpdateSiteCommandInput struct { + Body models.SiteView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices/interface.go new file mode 100644 index 00000000..fd391f08 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices/interface.go @@ -0,0 +1,15 @@ +package thirdPartyServices + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type ThirdPartyServicesAPI interface { + GetThirdPartyServicesCommand(input *GetThirdPartyServicesCommandInput) (output *models.ThirdPartyServicesView, resp *http.Response, err error) + AddThirdPartyServiceCommand(input *AddThirdPartyServiceCommandInput) (output *models.ThirdPartyServiceView, resp *http.Response, err error) + DeleteThirdPartyServiceCommand(input *DeleteThirdPartyServiceCommandInput) (resp *http.Response, err error) + GetThirdPartyServiceCommand(input *GetThirdPartyServiceCommandInput) (output *models.ThirdPartyServiceView, resp *http.Response, err error) + UpdateThirdPartyServiceCommand(input *UpdateThirdPartyServiceCommandInput) (output *models.ThirdPartyServiceView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices/thirdPartyServices.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices/thirdPartyServices.go new file mode 100644 index 00000000..6d9dddda --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices/thirdPartyServices.go @@ -0,0 +1,195 @@ +package thirdPartyServices + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "ThirdPartyServices" +) + +//ThirdPartyServicesService provides the API operations for making requests to +// ThirdPartyServices endpoint. +type ThirdPartyServicesService struct { + *client.Client +} + +//New createa a new instance of the ThirdPartyServicesService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a ThirdPartyServicesService from the configuration +// svc := thirdPartyServices.New(cfg) +// +func New(cfg *config.Config) *ThirdPartyServicesService { + + return &ThirdPartyServicesService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a ThirdPartyServices operation +func (s *ThirdPartyServicesService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetThirdPartyServicesCommand - Get all Third-Party Services +//RequestType: GET +//Input: input *GetThirdPartyServicesCommandInput +func (s *ThirdPartyServicesService) GetThirdPartyServicesCommand(input *GetThirdPartyServicesCommandInput) (output *models.ThirdPartyServicesView, resp *http.Response, err error) { + path := "/thirdPartyServices" + op := &request.Operation{ + Name: "GetThirdPartyServicesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.ThirdPartyServicesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetThirdPartyServicesCommandInput - Inputs for GetThirdPartyServicesCommand +type GetThirdPartyServicesCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddThirdPartyServiceCommand - Create a Third-Party Service +//RequestType: POST +//Input: input *AddThirdPartyServiceCommandInput +func (s *ThirdPartyServicesService) AddThirdPartyServiceCommand(input *AddThirdPartyServiceCommandInput) (output *models.ThirdPartyServiceView, resp *http.Response, err error) { + path := "/thirdPartyServices" + op := &request.Operation{ + Name: "AddThirdPartyServiceCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ThirdPartyServiceView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddThirdPartyServiceCommandInput - Inputs for AddThirdPartyServiceCommand +type AddThirdPartyServiceCommandInput struct { + Body models.ThirdPartyServiceView +} + +//DeleteThirdPartyServiceCommand - Delete a Third-Party Service +//RequestType: DELETE +//Input: input *DeleteThirdPartyServiceCommandInput +func (s *ThirdPartyServicesService) DeleteThirdPartyServiceCommand(input *DeleteThirdPartyServiceCommandInput) (resp *http.Response, err error) { + path := "/thirdPartyServices/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteThirdPartyServiceCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteThirdPartyServiceCommandInput - Inputs for DeleteThirdPartyServiceCommand +type DeleteThirdPartyServiceCommandInput struct { + Id string +} + +//GetThirdPartyServiceCommand - Get a Third-Party Service +//RequestType: GET +//Input: input *GetThirdPartyServiceCommandInput +func (s *ThirdPartyServicesService) GetThirdPartyServiceCommand(input *GetThirdPartyServiceCommandInput) (output *models.ThirdPartyServiceView, resp *http.Response, err error) { + path := "/thirdPartyServices/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetThirdPartyServiceCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ThirdPartyServiceView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetThirdPartyServiceCommandInput - Inputs for GetThirdPartyServiceCommand +type GetThirdPartyServiceCommandInput struct { + Id string +} + +//UpdateThirdPartyServiceCommand - Update a Third-Party Service +//RequestType: PUT +//Input: input *UpdateThirdPartyServiceCommandInput +func (s *ThirdPartyServicesService) UpdateThirdPartyServiceCommand(input *UpdateThirdPartyServiceCommandInput) (output *models.ThirdPartyServiceView, resp *http.Response, err error) { + path := "/thirdPartyServices/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateThirdPartyServiceCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.ThirdPartyServiceView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateThirdPartyServiceCommandInput - Inputs for UpdateThirdPartyServiceCommand +type UpdateThirdPartyServiceCommandInput struct { + Body models.ThirdPartyServiceView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/tokenProvider/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/tokenProvider/interface.go new file mode 100644 index 00000000..4fedab4f --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/tokenProvider/interface.go @@ -0,0 +1,13 @@ +package tokenProvider + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type TokenProviderAPI interface { + DeleteTokenProviderSettingCommand() (resp *http.Response, err error) + GetTokenProviderSettingCommand() (output *models.TokenProviderSettingView, resp *http.Response, err error) + UpdateTokenProviderSettingCommand(input *UpdateTokenProviderSettingCommandInput) (output *models.TokenProviderSettingView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/tokenProvider/tokenProvider.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/tokenProvider/tokenProvider.go new file mode 100644 index 00000000..f03ed183 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/tokenProvider/tokenProvider.go @@ -0,0 +1,113 @@ +package tokenProvider + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "TokenProvider" +) + +//TokenProviderService provides the API operations for making requests to +// TokenProvider endpoint. +type TokenProviderService struct { + *client.Client +} + +//New createa a new instance of the TokenProviderService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a TokenProviderService from the configuration +// svc := tokenProvider.New(cfg) +// +func New(cfg *config.Config) *TokenProviderService { + + return &TokenProviderService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a TokenProvider operation +func (s *TokenProviderService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeleteTokenProviderSettingCommand - Resets the Token Provider settings to default values +//RequestType: DELETE +//Input: +func (s *TokenProviderService) DeleteTokenProviderSettingCommand() (resp *http.Response, err error) { + path := "/tokenProvider/settings" + op := &request.Operation{ + Name: "DeleteTokenProviderSettingCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetTokenProviderSettingCommand - Get the Token Provider settings +//RequestType: GET +//Input: +func (s *TokenProviderService) GetTokenProviderSettingCommand() (output *models.TokenProviderSettingView, resp *http.Response, err error) { + path := "/tokenProvider/settings" + op := &request.Operation{ + Name: "GetTokenProviderSettingCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.TokenProviderSettingView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateTokenProviderSettingCommand - Update the Token Provider setting +//RequestType: PUT +//Input: input *UpdateTokenProviderSettingCommandInput +func (s *TokenProviderService) UpdateTokenProviderSettingCommand(input *UpdateTokenProviderSettingCommandInput) (output *models.TokenProviderSettingView, resp *http.Response, err error) { + path := "/tokenProvider/settings" + op := &request.Operation{ + Name: "UpdateTokenProviderSettingCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.TokenProviderSettingView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateTokenProviderSettingCommandInput - Inputs for UpdateTokenProviderSettingCommand +type UpdateTokenProviderSettingCommandInput struct { + Body models.TokenProviderSettingView +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups/interface.go new file mode 100644 index 00000000..22df5b59 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups/interface.go @@ -0,0 +1,15 @@ +package trustedCertificateGroups + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type TrustedCertificateGroupsAPI interface { + GetTrustedCertificateGroupsCommand(input *GetTrustedCertificateGroupsCommandInput) (output *models.TrustedCertificateGroupsView, resp *http.Response, err error) + AddTrustedCertificateGroupCommand(input *AddTrustedCertificateGroupCommandInput) (output *models.TrustedCertificateGroupView, resp *http.Response, err error) + DeleteTrustedCertificateGroupCommand(input *DeleteTrustedCertificateGroupCommandInput) (resp *http.Response, err error) + GetTrustedCertificateGroupCommand(input *GetTrustedCertificateGroupCommandInput) (output *models.TrustedCertificateGroupView, resp *http.Response, err error) + UpdateTrustedCertificateGroupCommand(input *UpdateTrustedCertificateGroupCommandInput) (output *models.TrustedCertificateGroupView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups/trustedCertificateGroups.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups/trustedCertificateGroups.go new file mode 100644 index 00000000..9c936d1d --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups/trustedCertificateGroups.go @@ -0,0 +1,195 @@ +package trustedCertificateGroups + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "TrustedCertificateGroups" +) + +//TrustedCertificateGroupsService provides the API operations for making requests to +// TrustedCertificateGroups endpoint. +type TrustedCertificateGroupsService struct { + *client.Client +} + +//New createa a new instance of the TrustedCertificateGroupsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a TrustedCertificateGroupsService from the configuration +// svc := trustedCertificateGroups.New(cfg) +// +func New(cfg *config.Config) *TrustedCertificateGroupsService { + + return &TrustedCertificateGroupsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a TrustedCertificateGroups operation +func (s *TrustedCertificateGroupsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetTrustedCertificateGroupsCommand - Get all Trusted Certificate Groups +//RequestType: GET +//Input: input *GetTrustedCertificateGroupsCommandInput +func (s *TrustedCertificateGroupsService) GetTrustedCertificateGroupsCommand(input *GetTrustedCertificateGroupsCommandInput) (output *models.TrustedCertificateGroupsView, resp *http.Response, err error) { + path := "/trustedCertificateGroups" + op := &request.Operation{ + Name: "GetTrustedCertificateGroupsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.TrustedCertificateGroupsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetTrustedCertificateGroupsCommandInput - Inputs for GetTrustedCertificateGroupsCommand +type GetTrustedCertificateGroupsCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddTrustedCertificateGroupCommand - Create a Trusted Certificate Group +//RequestType: POST +//Input: input *AddTrustedCertificateGroupCommandInput +func (s *TrustedCertificateGroupsService) AddTrustedCertificateGroupCommand(input *AddTrustedCertificateGroupCommandInput) (output *models.TrustedCertificateGroupView, resp *http.Response, err error) { + path := "/trustedCertificateGroups" + op := &request.Operation{ + Name: "AddTrustedCertificateGroupCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.TrustedCertificateGroupView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddTrustedCertificateGroupCommandInput - Inputs for AddTrustedCertificateGroupCommand +type AddTrustedCertificateGroupCommandInput struct { + Body models.TrustedCertificateGroupView +} + +//DeleteTrustedCertificateGroupCommand - Delete a Trusted Certificate Group +//RequestType: DELETE +//Input: input *DeleteTrustedCertificateGroupCommandInput +func (s *TrustedCertificateGroupsService) DeleteTrustedCertificateGroupCommand(input *DeleteTrustedCertificateGroupCommandInput) (resp *http.Response, err error) { + path := "/trustedCertificateGroups/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteTrustedCertificateGroupCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteTrustedCertificateGroupCommandInput - Inputs for DeleteTrustedCertificateGroupCommand +type DeleteTrustedCertificateGroupCommandInput struct { + Id string +} + +//GetTrustedCertificateGroupCommand - Get a Trusted Certificate Group +//RequestType: GET +//Input: input *GetTrustedCertificateGroupCommandInput +func (s *TrustedCertificateGroupsService) GetTrustedCertificateGroupCommand(input *GetTrustedCertificateGroupCommandInput) (output *models.TrustedCertificateGroupView, resp *http.Response, err error) { + path := "/trustedCertificateGroups/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetTrustedCertificateGroupCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.TrustedCertificateGroupView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetTrustedCertificateGroupCommandInput - Inputs for GetTrustedCertificateGroupCommand +type GetTrustedCertificateGroupCommandInput struct { + Id string +} + +//UpdateTrustedCertificateGroupCommand - Update a TrustedCertificateGroup +//RequestType: PUT +//Input: input *UpdateTrustedCertificateGroupCommandInput +func (s *TrustedCertificateGroupsService) UpdateTrustedCertificateGroupCommand(input *UpdateTrustedCertificateGroupCommandInput) (output *models.TrustedCertificateGroupView, resp *http.Response, err error) { + path := "/trustedCertificateGroups/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateTrustedCertificateGroupCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.TrustedCertificateGroupView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateTrustedCertificateGroupCommandInput - Inputs for UpdateTrustedCertificateGroupCommand +type UpdateTrustedCertificateGroupCommandInput struct { + Body models.TrustedCertificateGroupView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/unknownResources/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/unknownResources/interface.go new file mode 100644 index 00000000..a29275d5 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/unknownResources/interface.go @@ -0,0 +1,13 @@ +package unknownResources + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type UnknownResourcesAPI interface { + Delete() (resp *http.Response, err error) + Get() (output *models.UnknownResourceSettingsView, resp *http.Response, err error) + Update(input *UpdateInput) (output *models.UnknownResourceSettingsView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/unknownResources/unknownResources.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/unknownResources/unknownResources.go new file mode 100644 index 00000000..f0542d42 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/unknownResources/unknownResources.go @@ -0,0 +1,113 @@ +package unknownResources + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "UnknownResources" +) + +//UnknownResourcesService provides the API operations for making requests to +// UnknownResources endpoint. +type UnknownResourcesService struct { + *client.Client +} + +//New createa a new instance of the UnknownResourcesService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a UnknownResourcesService from the configuration +// svc := unknownResources.New(cfg) +// +func New(cfg *config.Config) *UnknownResourcesService { + + return &UnknownResourcesService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a UnknownResources operation +func (s *UnknownResourcesService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//Delete - Resets the global settings for unknown resources to default values +//RequestType: DELETE +//Input: +func (s *UnknownResourcesService) Delete() (resp *http.Response, err error) { + path := "/unknownResources/settings" + op := &request.Operation{ + Name: "Delete", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//Get - Retrieves the global settings for unknown resources +//RequestType: GET +//Input: +func (s *UnknownResourcesService) Get() (output *models.UnknownResourceSettingsView, resp *http.Response, err error) { + path := "/unknownResources/settings" + op := &request.Operation{ + Name: "Get", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.UnknownResourceSettingsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//Update - Updates the global settings for unknown resources +//RequestType: PUT +//Input: input *UpdateInput +func (s *UnknownResourcesService) Update(input *UpdateInput) (output *models.UnknownResourceSettingsView, resp *http.Response, err error) { + path := "/unknownResources/settings" + op := &request.Operation{ + Name: "Update", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.UnknownResourceSettingsView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateInput - Inputs for Update +type UpdateInput struct { + Body models.UnknownResourceSettingsView +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/users/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/users/interface.go new file mode 100644 index 00000000..83521b9c --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/users/interface.go @@ -0,0 +1,14 @@ +package users + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type UsersAPI interface { + GetUsersCommand(input *GetUsersCommandInput) (output *models.UsersView, resp *http.Response, err error) + GetUserCommand(input *GetUserCommandInput) (output *models.UserView, resp *http.Response, err error) + UpdateUserCommand(input *UpdateUserCommandInput) (output *models.UserView, resp *http.Response, err error) + UpdateUserPasswordCommand(input *UpdateUserPasswordCommandInput) (output *models.UserPasswordView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/users/users.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/users/users.go new file mode 100644 index 00000000..b948617f --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/users/users.go @@ -0,0 +1,171 @@ +package users + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Users" +) + +//UsersService provides the API operations for making requests to +// Users endpoint. +type UsersService struct { + *client.Client +} + +//New createa a new instance of the UsersService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a UsersService from the configuration +// svc := users.New(cfg) +// +func New(cfg *config.Config) *UsersService { + + return &UsersService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Users operation +func (s *UsersService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetUsersCommand - Get all Users +//RequestType: GET +//Input: input *GetUsersCommandInput +func (s *UsersService) GetUsersCommand(input *GetUsersCommandInput) (output *models.UsersView, resp *http.Response, err error) { + path := "/users" + op := &request.Operation{ + Name: "GetUsersCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "username": input.Username, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.UsersView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetUsersCommandInput - Inputs for GetUsersCommand +type GetUsersCommandInput struct { + Page string + NumberPerPage string + Filter string + Username string + SortKey string + Order string +} + +//GetUserCommand - Get a User +//RequestType: GET +//Input: input *GetUserCommandInput +func (s *UsersService) GetUserCommand(input *GetUserCommandInput) (output *models.UserView, resp *http.Response, err error) { + path := "/users/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetUserCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.UserView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetUserCommandInput - Inputs for GetUserCommand +type GetUserCommandInput struct { + Id string +} + +//UpdateUserCommand - Update a User +//RequestType: PUT +//Input: input *UpdateUserCommandInput +func (s *UsersService) UpdateUserCommand(input *UpdateUserCommandInput) (output *models.UserView, resp *http.Response, err error) { + path := "/users/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateUserCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.UserView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateUserCommandInput - Inputs for UpdateUserCommand +type UpdateUserCommandInput struct { + Body models.UserView + Id string +} + +//UpdateUserPasswordCommand - Update a User's Password +//RequestType: PUT +//Input: input *UpdateUserPasswordCommandInput +func (s *UsersService) UpdateUserPasswordCommand(input *UpdateUserPasswordCommandInput) (output *models.UserPasswordView, resp *http.Response, err error) { + path := "/users/{id}/password" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateUserPasswordCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.UserPasswordView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateUserPasswordCommandInput - Inputs for UpdateUserPasswordCommand +type UpdateUserPasswordCommandInput struct { + Body models.UserPasswordView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/version/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/version/interface.go new file mode 100644 index 00000000..a153c9aa --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/version/interface.go @@ -0,0 +1,11 @@ +package version + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type VersionAPI interface { + VersionCommand() (output *models.VersionDocClass, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/version/version.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/version/version.go new file mode 100644 index 00000000..36d8aa9c --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/version/version.go @@ -0,0 +1,69 @@ +package version + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Version" +) + +//VersionService provides the API operations for making requests to +// Version endpoint. +type VersionService struct { + *client.Client +} + +//New createa a new instance of the VersionService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a VersionService from the configuration +// svc := version.New(cfg) +// +func New(cfg *config.Config) *VersionService { + + return &VersionService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Version operation +func (s *VersionService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//VersionCommand - Get the PingAccess version number +//RequestType: GET +//Input: +func (s *VersionService) VersionCommand() (output *models.VersionDocClass, resp *http.Response, err error) { + path := "/version" + op := &request.Operation{ + Name: "VersionCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.VersionDocClass{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts/interface.go new file mode 100644 index 00000000..273f9f6d --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts/interface.go @@ -0,0 +1,15 @@ +package virtualhosts + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type VirtualhostsAPI interface { + GetVirtualHostsCommand(input *GetVirtualHostsCommandInput) (output *models.VirtualHostsView, resp *http.Response, err error) + AddVirtualHostCommand(input *AddVirtualHostCommandInput) (output *models.VirtualHostView, resp *http.Response, err error) + DeleteVirtualHostCommand(input *DeleteVirtualHostCommandInput) (resp *http.Response, err error) + GetVirtualHostCommand(input *GetVirtualHostCommandInput) (output *models.VirtualHostView, resp *http.Response, err error) + UpdateVirtualHostCommand(input *UpdateVirtualHostCommandInput) (output *models.VirtualHostView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts/virtualhosts.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts/virtualhosts.go new file mode 100644 index 00000000..5cd13922 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts/virtualhosts.go @@ -0,0 +1,195 @@ +package virtualhosts + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "Virtualhosts" +) + +//VirtualhostsService provides the API operations for making requests to +// Virtualhosts endpoint. +type VirtualhostsService struct { + *client.Client +} + +//New createa a new instance of the VirtualhostsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a VirtualhostsService from the configuration +// svc := virtualhosts.New(cfg) +// +func New(cfg *config.Config) *VirtualhostsService { + + return &VirtualhostsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a Virtualhosts operation +func (s *VirtualhostsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetVirtualHostsCommand - Get all Virtual Hosts +//RequestType: GET +//Input: input *GetVirtualHostsCommandInput +func (s *VirtualhostsService) GetVirtualHostsCommand(input *GetVirtualHostsCommandInput) (output *models.VirtualHostsView, resp *http.Response, err error) { + path := "/virtualhosts" + op := &request.Operation{ + Name: "GetVirtualHostsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "virtualHost": input.VirtualHost, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.VirtualHostsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetVirtualHostsCommandInput - Inputs for GetVirtualHostsCommand +type GetVirtualHostsCommandInput struct { + Page string + NumberPerPage string + Filter string + VirtualHost string + SortKey string + Order string +} + +//AddVirtualHostCommand - Create a Virtual Host +//RequestType: POST +//Input: input *AddVirtualHostCommandInput +func (s *VirtualhostsService) AddVirtualHostCommand(input *AddVirtualHostCommandInput) (output *models.VirtualHostView, resp *http.Response, err error) { + path := "/virtualhosts" + op := &request.Operation{ + Name: "AddVirtualHostCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.VirtualHostView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddVirtualHostCommandInput - Inputs for AddVirtualHostCommand +type AddVirtualHostCommandInput struct { + Body models.VirtualHostView +} + +//DeleteVirtualHostCommand - Delete a Virtual Host +//RequestType: DELETE +//Input: input *DeleteVirtualHostCommandInput +func (s *VirtualhostsService) DeleteVirtualHostCommand(input *DeleteVirtualHostCommandInput) (resp *http.Response, err error) { + path := "/virtualhosts/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteVirtualHostCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteVirtualHostCommandInput - Inputs for DeleteVirtualHostCommand +type DeleteVirtualHostCommandInput struct { + Id string +} + +//GetVirtualHostCommand - Get a Virtual Host +//RequestType: GET +//Input: input *GetVirtualHostCommandInput +func (s *VirtualhostsService) GetVirtualHostCommand(input *GetVirtualHostCommandInput) (output *models.VirtualHostView, resp *http.Response, err error) { + path := "/virtualhosts/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetVirtualHostCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.VirtualHostView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetVirtualHostCommandInput - Inputs for GetVirtualHostCommand +type GetVirtualHostCommandInput struct { + Id string +} + +//UpdateVirtualHostCommand - Update a Virtual Host +//RequestType: PUT +//Input: input *UpdateVirtualHostCommandInput +func (s *VirtualhostsService) UpdateVirtualHostCommand(input *UpdateVirtualHostCommandInput) (output *models.VirtualHostView, resp *http.Response, err error) { + path := "/virtualhosts/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateVirtualHostCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.VirtualHostView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateVirtualHostCommandInput - Inputs for UpdateVirtualHostCommand +type UpdateVirtualHostCommandInput struct { + Body models.VirtualHostView + Id string +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessionManagement/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessionManagement/interface.go new file mode 100644 index 00000000..49379b45 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessionManagement/interface.go @@ -0,0 +1,22 @@ +package webSessionManagement + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type WebSessionManagementAPI interface { + DeleteWebSessionManagementCommand() (resp *http.Response, err error) + GetWebSessionManagementCommand() (output *models.WebSessionManagementView, resp *http.Response, err error) + UpdateWebSessionManagementCommand(input *UpdateWebSessionManagementCommandInput) (output *models.WebSessionManagementView, resp *http.Response, err error) + GetCookieTypes() (output *models.CookieTypesView, resp *http.Response, err error) + GetWebSessionSupportedEncryptionAlgorithmsCommand() (output *models.AlgorithmsView, resp *http.Response, err error) + GetWebSessionKeySetCommand() (output *models.KeySetView, resp *http.Response, err error) + UpdateWebSessionKeySetCommand(input *UpdateWebSessionKeySetCommandInput) (output *models.KeySetView, resp *http.Response, err error) + GetOidcLoginTypes() (output *models.OidcLoginTypesView, resp *http.Response, err error) + GetOidcScopesCommand(input *GetOidcScopesCommandInput) (output *models.SupportedScopesView, resp *http.Response, err error) + GetRequestPreservationTypes() (output *models.RequestPreservationTypesView, resp *http.Response, err error) + GetWebSessionSupportedSigningAlgorithms() (output *models.SigningAlgorithmsView, resp *http.Response, err error) + GetWebStorageTypes() (output *models.WebStorageTypesView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessionManagement/webSessionManagement.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessionManagement/webSessionManagement.go new file mode 100644 index 00000000..6340f3c2 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessionManagement/webSessionManagement.go @@ -0,0 +1,298 @@ +package webSessionManagement + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "WebSessionManagement" +) + +//WebSessionManagementService provides the API operations for making requests to +// WebSessionManagement endpoint. +type WebSessionManagementService struct { + *client.Client +} + +//New createa a new instance of the WebSessionManagementService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a WebSessionManagementService from the configuration +// svc := webSessionManagement.New(cfg) +// +func New(cfg *config.Config) *WebSessionManagementService { + + return &WebSessionManagementService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a WebSessionManagement operation +func (s *WebSessionManagementService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//DeleteWebSessionManagementCommand - Resets the Web Session Management configuration to default values +//RequestType: DELETE +//Input: +func (s *WebSessionManagementService) DeleteWebSessionManagementCommand() (resp *http.Response, err error) { + path := "/webSessionManagement" + op := &request.Operation{ + Name: "DeleteWebSessionManagementCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +//GetWebSessionManagementCommand - Get the Web Session Management configuration +//RequestType: GET +//Input: +func (s *WebSessionManagementService) GetWebSessionManagementCommand() (output *models.WebSessionManagementView, resp *http.Response, err error) { + path := "/webSessionManagement" + op := &request.Operation{ + Name: "GetWebSessionManagementCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.WebSessionManagementView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateWebSessionManagementCommand - Update the Web Session Management configuration +//RequestType: PUT +//Input: input *UpdateWebSessionManagementCommandInput +func (s *WebSessionManagementService) UpdateWebSessionManagementCommand(input *UpdateWebSessionManagementCommandInput) (output *models.WebSessionManagementView, resp *http.Response, err error) { + path := "/webSessionManagement" + op := &request.Operation{ + Name: "UpdateWebSessionManagementCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.WebSessionManagementView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateWebSessionManagementCommandInput - Inputs for UpdateWebSessionManagementCommand +type UpdateWebSessionManagementCommandInput struct { + Body models.WebSessionManagementView +} + +//GetCookieTypes - Get the valid OIDC Cookie Types +//RequestType: GET +//Input: +func (s *WebSessionManagementService) GetCookieTypes() (output *models.CookieTypesView, resp *http.Response, err error) { + path := "/webSessionManagement/cookieTypes" + op := &request.Operation{ + Name: "GetCookieTypes", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.CookieTypesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetWebSessionSupportedEncryptionAlgorithmsCommand - Get the valid algorithms for Web Session Cookie Encryption +//RequestType: GET +//Input: +func (s *WebSessionManagementService) GetWebSessionSupportedEncryptionAlgorithmsCommand() (output *models.AlgorithmsView, resp *http.Response, err error) { + path := "/webSessionManagement/encryptionAlgorithms" + op := &request.Operation{ + Name: "GetWebSessionSupportedEncryptionAlgorithmsCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.AlgorithmsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetWebSessionKeySetCommand - Get the Web Session key set configuration +//RequestType: GET +//Input: +func (s *WebSessionManagementService) GetWebSessionKeySetCommand() (output *models.KeySetView, resp *http.Response, err error) { + path := "/webSessionManagement/keySet" + op := &request.Operation{ + Name: "GetWebSessionKeySetCommand", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.KeySetView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//UpdateWebSessionKeySetCommand - Update the WebSession key set configuration +//RequestType: PUT +//Input: input *UpdateWebSessionKeySetCommandInput +func (s *WebSessionManagementService) UpdateWebSessionKeySetCommand(input *UpdateWebSessionKeySetCommandInput) (output *models.KeySetView, resp *http.Response, err error) { + path := "/webSessionManagement/keySet" + op := &request.Operation{ + Name: "UpdateWebSessionKeySetCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.KeySetView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateWebSessionKeySetCommandInput - Inputs for UpdateWebSessionKeySetCommand +type UpdateWebSessionKeySetCommandInput struct { + Body models.KeySetView +} + +//GetOidcLoginTypes - Get the valid OIDC Login Types +//RequestType: GET +//Input: +func (s *WebSessionManagementService) GetOidcLoginTypes() (output *models.OidcLoginTypesView, resp *http.Response, err error) { + path := "/webSessionManagement/oidcLoginTypes" + op := &request.Operation{ + Name: "GetOidcLoginTypes", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.OidcLoginTypesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetOidcScopesCommand - Get the scopes supported by the current OIDC Provider +//RequestType: GET +//Input: input *GetOidcScopesCommandInput +func (s *WebSessionManagementService) GetOidcScopesCommand(input *GetOidcScopesCommandInput) (output *models.SupportedScopesView, resp *http.Response, err error) { + path := "/webSessionManagement/oidcScopes" + op := &request.Operation{ + Name: "GetOidcScopesCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "clientId": input.ClientId, + }, + } + output = &models.SupportedScopesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetOidcScopesCommandInput - Inputs for GetOidcScopesCommand +type GetOidcScopesCommandInput struct { + ClientId string +} + +//GetRequestPreservationTypes - Get the valid Request Preservation Types +//RequestType: GET +//Input: +func (s *WebSessionManagementService) GetRequestPreservationTypes() (output *models.RequestPreservationTypesView, resp *http.Response, err error) { + path := "/webSessionManagement/requestPreservationTypes" + op := &request.Operation{ + Name: "GetRequestPreservationTypes", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.RequestPreservationTypesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetWebSessionSupportedSigningAlgorithms - Get the valid algorithms for Web Session Cookie Signing +//RequestType: GET +//Input: +func (s *WebSessionManagementService) GetWebSessionSupportedSigningAlgorithms() (output *models.SigningAlgorithmsView, resp *http.Response, err error) { + path := "/webSessionManagement/signingAlgorithms" + op := &request.Operation{ + Name: "GetWebSessionSupportedSigningAlgorithms", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.SigningAlgorithmsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +//GetWebStorageTypes - Get the valid Web Storage Types +//RequestType: GET +//Input: +func (s *WebSessionManagementService) GetWebStorageTypes() (output *models.WebStorageTypesView, resp *http.Response, err error) { + path := "/webSessionManagement/webStorageTypes" + op := &request.Operation{ + Name: "GetWebStorageTypes", + HTTPMethod: "GET", + HTTPPath: path, + } + output = &models.WebStorageTypesView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessions/interface.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessions/interface.go new file mode 100644 index 00000000..1a90173a --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessions/interface.go @@ -0,0 +1,15 @@ +package webSessions + +import ( + "net/http" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" +) + +type WebSessionsAPI interface { + GetWebSessionsCommand(input *GetWebSessionsCommandInput) (output *models.WebSessionsView, resp *http.Response, err error) + AddWebSessionCommand(input *AddWebSessionCommandInput) (output *models.WebSessionView, resp *http.Response, err error) + DeleteWebSessionCommand(input *DeleteWebSessionCommandInput) (resp *http.Response, err error) + GetWebSessionCommand(input *GetWebSessionCommandInput) (output *models.WebSessionView, resp *http.Response, err error) + UpdateWebSessionCommand(input *UpdateWebSessionCommandInput) (output *models.WebSessionView, resp *http.Response, err error) +} diff --git a/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessions/webSessions.go b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessions/webSessions.go new file mode 100644 index 00000000..1d6ee941 --- /dev/null +++ b/vendor/github.com/iwarapter/pingaccess-sdk-go/services/webSessions/webSessions.go @@ -0,0 +1,195 @@ +package webSessions + +import ( + "net/http" + "strings" + + "github.com/iwarapter/pingaccess-sdk-go/pingaccess" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/config" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/models" + "github.com/iwarapter/pingaccess-sdk-go/pingaccess/request" +) + +const ( + // ServiceName - The name of service. + ServiceName = "WebSessions" +) + +//WebSessionsService provides the API operations for making requests to +// WebSessions endpoint. +type WebSessionsService struct { + *client.Client +} + +//New createa a new instance of the WebSessionsService client. +// +// Example: +// cfg := config.NewConfig().WithUsername("Administrator").WithPassword("2FederateM0re").WithEndpoint(paURL.String()) +// +// //Create a WebSessionsService from the configuration +// svc := webSessions.New(cfg) +// +func New(cfg *config.Config) *WebSessionsService { + + return &WebSessionsService{Client: client.New( + *cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: *cfg.Endpoint, + APIVersion: pingaccess.SDKVersion, + }, + )} +} + +// newRequest creates a new request for a WebSessions operation +func (s *WebSessionsService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := s.NewRequest(op, params, data) + + return req +} + +//GetWebSessionsCommand - Get all WebSessions +//RequestType: GET +//Input: input *GetWebSessionsCommandInput +func (s *WebSessionsService) GetWebSessionsCommand(input *GetWebSessionsCommandInput) (output *models.WebSessionsView, resp *http.Response, err error) { + path := "/webSessions" + op := &request.Operation{ + Name: "GetWebSessionsCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{ + "page": input.Page, + "numberPerPage": input.NumberPerPage, + "filter": input.Filter, + "name": input.Name, + "sortKey": input.SortKey, + "order": input.Order, + }, + } + output = &models.WebSessionsView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetWebSessionsCommandInput - Inputs for GetWebSessionsCommand +type GetWebSessionsCommandInput struct { + Page string + NumberPerPage string + Filter string + Name string + SortKey string + Order string +} + +//AddWebSessionCommand - Create a WebSession +//RequestType: POST +//Input: input *AddWebSessionCommandInput +func (s *WebSessionsService) AddWebSessionCommand(input *AddWebSessionCommandInput) (output *models.WebSessionView, resp *http.Response, err error) { + path := "/webSessions" + op := &request.Operation{ + Name: "AddWebSessionCommand", + HTTPMethod: "POST", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.WebSessionView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// AddWebSessionCommandInput - Inputs for AddWebSessionCommand +type AddWebSessionCommandInput struct { + Body models.WebSessionView +} + +//DeleteWebSessionCommand - Delete a WebSession +//RequestType: DELETE +//Input: input *DeleteWebSessionCommandInput +func (s *WebSessionsService) DeleteWebSessionCommand(input *DeleteWebSessionCommandInput) (resp *http.Response, err error) { + path := "/webSessions/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "DeleteWebSessionCommand", + HTTPMethod: "DELETE", + HTTPPath: path, + QueryParams: map[string]string{}, + } + + req := s.newRequest(op, nil, nil) + + if req.Send() == nil { + return req.HTTPResponse, nil + } + return req.HTTPResponse, req.Error +} + +// DeleteWebSessionCommandInput - Inputs for DeleteWebSessionCommand +type DeleteWebSessionCommandInput struct { + Id string +} + +//GetWebSessionCommand - Get a WebSession +//RequestType: GET +//Input: input *GetWebSessionCommandInput +func (s *WebSessionsService) GetWebSessionCommand(input *GetWebSessionCommandInput) (output *models.WebSessionView, resp *http.Response, err error) { + path := "/webSessions/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "GetWebSessionCommand", + HTTPMethod: "GET", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.WebSessionView{} + req := s.newRequest(op, nil, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// GetWebSessionCommandInput - Inputs for GetWebSessionCommand +type GetWebSessionCommandInput struct { + Id string +} + +//UpdateWebSessionCommand - Update a WebSession +//RequestType: PUT +//Input: input *UpdateWebSessionCommandInput +func (s *WebSessionsService) UpdateWebSessionCommand(input *UpdateWebSessionCommandInput) (output *models.WebSessionView, resp *http.Response, err error) { + path := "/webSessions/{id}" + path = strings.Replace(path, "{id}", input.Id, -1) + + op := &request.Operation{ + Name: "UpdateWebSessionCommand", + HTTPMethod: "PUT", + HTTPPath: path, + QueryParams: map[string]string{}, + } + output = &models.WebSessionView{} + req := s.newRequest(op, input.Body, output) + + if req.Send() == nil { + return output, req.HTTPResponse, nil + } + return nil, req.HTTPResponse, req.Error +} + +// UpdateWebSessionCommandInput - Inputs for UpdateWebSessionCommand +type UpdateWebSessionCommandInput struct { + Body models.WebSessionView + Id string +} diff --git a/vendor/modules.txt b/vendor/modules.txt index e29a5d3f..298e4487 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,8 +58,6 @@ github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d github.com/bgentry/go-netrc/netrc -# github.com/cenkalti/backoff v2.2.1+incompatible -## explicit # github.com/cenkalti/backoff/v3 v3.0.0 github.com/cenkalti/backoff/v3 # github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c @@ -79,15 +77,19 @@ github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-cmp v0.3.1 +# github.com/google/go-cmp v0.4.1 ## explicit github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value +# github.com/google/uuid v1.1.1 +github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.0.5 github.com/googleapis/gax-go/v2 +# github.com/gotestyourself/gotestyourself v2.2.0+incompatible +## explicit # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap # github.com/hashicorp/go-cleanhttp v0.5.1 @@ -149,9 +151,58 @@ github.com/hashicorp/terraform-plugin-sdk/v2/terraform github.com/hashicorp/terraform-plugin-test # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux -# github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200614193934-b24fe6e545e5 +# github.com/iwarapter/pingaccess-sdk-go v0.0.0-20200704204151-1493c5058af2 ## explicit github.com/iwarapter/pingaccess-sdk-go/pingaccess +github.com/iwarapter/pingaccess-sdk-go/pingaccess/client +github.com/iwarapter/pingaccess-sdk-go/pingaccess/client/metadata +github.com/iwarapter/pingaccess-sdk-go/pingaccess/config +github.com/iwarapter/pingaccess-sdk-go/pingaccess/models +github.com/iwarapter/pingaccess-sdk-go/pingaccess/request +github.com/iwarapter/pingaccess-sdk-go/services/accessTokenValidators +github.com/iwarapter/pingaccess-sdk-go/services/acme +github.com/iwarapter/pingaccess-sdk-go/services/adminConfig +github.com/iwarapter/pingaccess-sdk-go/services/adminSessionInfo +github.com/iwarapter/pingaccess-sdk-go/services/agents +github.com/iwarapter/pingaccess-sdk-go/services/applications +github.com/iwarapter/pingaccess-sdk-go/services/auth +github.com/iwarapter/pingaccess-sdk-go/services/authTokenManagement +github.com/iwarapter/pingaccess-sdk-go/services/authnReqLists +github.com/iwarapter/pingaccess-sdk-go/services/backup +github.com/iwarapter/pingaccess-sdk-go/services/certificates +github.com/iwarapter/pingaccess-sdk-go/services/config +github.com/iwarapter/pingaccess-sdk-go/services/engineListeners +github.com/iwarapter/pingaccess-sdk-go/services/engines +github.com/iwarapter/pingaccess-sdk-go/services/globalUnprotectedResources +github.com/iwarapter/pingaccess-sdk-go/services/highAvailability +github.com/iwarapter/pingaccess-sdk-go/services/hsmProviders +github.com/iwarapter/pingaccess-sdk-go/services/httpConfig +github.com/iwarapter/pingaccess-sdk-go/services/httpsListeners +github.com/iwarapter/pingaccess-sdk-go/services/identityMappings +github.com/iwarapter/pingaccess-sdk-go/services/keyPairs +github.com/iwarapter/pingaccess-sdk-go/services/license +github.com/iwarapter/pingaccess-sdk-go/services/oauth +github.com/iwarapter/pingaccess-sdk-go/services/oauthKeyManagement +github.com/iwarapter/pingaccess-sdk-go/services/oidc +github.com/iwarapter/pingaccess-sdk-go/services/pingfederate +github.com/iwarapter/pingaccess-sdk-go/services/pingone +github.com/iwarapter/pingaccess-sdk-go/services/proxies +github.com/iwarapter/pingaccess-sdk-go/services/redirects +github.com/iwarapter/pingaccess-sdk-go/services/rejectionHandlers +github.com/iwarapter/pingaccess-sdk-go/services/rules +github.com/iwarapter/pingaccess-sdk-go/services/rulesets +github.com/iwarapter/pingaccess-sdk-go/services/sharedSecrets +github.com/iwarapter/pingaccess-sdk-go/services/siteAuthenticators +github.com/iwarapter/pingaccess-sdk-go/services/sites +github.com/iwarapter/pingaccess-sdk-go/services/thirdPartyServices +github.com/iwarapter/pingaccess-sdk-go/services/tokenProvider +github.com/iwarapter/pingaccess-sdk-go/services/trustedCertificateGroups +github.com/iwarapter/pingaccess-sdk-go/services/unknownResources +github.com/iwarapter/pingaccess-sdk-go/services/users +github.com/iwarapter/pingaccess-sdk-go/services/version +github.com/iwarapter/pingaccess-sdk-go/services/virtualhosts +github.com/iwarapter/pingaccess-sdk-go/services/webSessionManagement +github.com/iwarapter/pingaccess-sdk-go/services/webSessions # github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af github.com/jmespath/go-jmespath # github.com/konsorten/go-windows-terminal-sequences v1.0.2 @@ -352,3 +403,5 @@ google.golang.org/grpc/status google.golang.org/grpc/tap # gopkg.in/yaml.v2 v2.2.8 ## explicit +# gotest.tools v2.2.0+incompatible +## explicit From 861c305b4661891f6680c35f985ac254bad03e4c Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Mon, 6 Jul 2020 22:52:20 +0100 Subject: [PATCH 19/23] #20 add some initial support for password tracking in PingAccess 6.1+ --- Makefile | 3 +- func-tests/hsm_provider.tf | 8 +- pingaccess/config.go | 7 ++ pingaccess/provider_test.go | 8 +- .../resource_pingaccess_hsm_provider_test.go | 32 ++++---- .../resource_pingaccess_oauth_server.go | 22 +++-- .../resource_pingaccess_oauth_server_test.go | 72 ++++++++++++++-- ...urce_pingaccess_pingfederate_admin_test.go | 6 +- ...urce_pingaccess_pingfederate_oauth_test.go | 3 +- pingaccess/resource_pingaccess_websession.go | 24 ++++-- .../resource_pingaccess_websession_test.go | 82 +++++++++++++++++-- pingaccess/structures.go | 13 +-- pingaccess/structures_test.go | 15 ++-- 13 files changed, 226 insertions(+), 69 deletions(-) diff --git a/Makefile b/Makefile index c39c5913..268ab721 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,8 @@ # Makefile VERSION ?= 0.0.0 NAME=terraform-provider-pingaccess_v${VERSION} -BASE_DOCKER_TAG=pingidentity/pingaccess:6.0.2-edge +PINGACCESS_VERSION=6.1.0-edge +BASE_DOCKER_TAG=pingidentity/pingaccess:${PINGACCESS_VERSION} pa-init: @docker run --rm -d --hostname pingaccess --name pingaccess -e PING_IDENTITY_DEVOPS_KEY=$(PING_IDENTITY_DEVOPS_KEY) -e PING_IDENTITY_DEVOPS_USER=$(PING_IDENTITY_DEVOPS_USER) -e PING_IDENTITY_ACCEPT_EULA=YES --publish 9000:9000 ${BASE_DOCKER_TAG} diff --git a/func-tests/hsm_provider.tf b/func-tests/hsm_provider.tf index 819f64cc..4ec7860e 100644 --- a/func-tests/hsm_provider.tf +++ b/func-tests/hsm_provider.tf @@ -1,12 +1,12 @@ resource "pingaccess_hsm_provider" "test" { count = var.pa6 ? 1 : 0 - class_name = "com.pingidentity.pa.hsm.cloudhsm.plugin.AwsCloudHsmProvider" + class_name = "com.pingidentity.pa.hsm.pkcs11.plugin.PKCS11HsmProvider" name = "demo" configuration = < Date: Tue, 7 Jul 2020 08:29:18 +0100 Subject: [PATCH 20/23] #19 ensure app resources are rebuilt if the underlying app changes --- pingaccess/resource_pingaccess_application_resource.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pingaccess/resource_pingaccess_application_resource.go b/pingaccess/resource_pingaccess_application_resource.go index 43581071..49bbfb65 100644 --- a/pingaccess/resource_pingaccess_application_resource.go +++ b/pingaccess/resource_pingaccess_application_resource.go @@ -37,6 +37,7 @@ func resourcePingAccessApplicationResourceSchema() map[string]*schema.Schema { "application_id": { Type: schema.TypeString, Required: true, + ForceNew: true, }, "audit_level": { Type: schema.TypeString, From 513a623f4ed54fb91243bbcfcb7c73b1e45cb214 Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Tue, 7 Jul 2020 17:21:09 +0100 Subject: [PATCH 21/23] ensure app values are correctly set - and https listeners are correctly set when added --- main.go | 2 +- pingaccess/resource_pingaccess_application.go | 13 ++++--------- pingaccess/resource_pingaccess_application_test.go | 1 + pingaccess/resource_pingaccess_https_listener.go | 7 +++---- 4 files changed, 9 insertions(+), 14 deletions(-) diff --git a/main.go b/main.go index 9c96f9a3..99872671 100644 --- a/main.go +++ b/main.go @@ -15,7 +15,7 @@ func main() { flag.Parse() if debugMode { - err := plugin.Debug(context.Background(), "registry.terraform.io/-/pingfederate", + err := plugin.Debug(context.Background(), "registry.terraform.io/iwarapter/pingaccess", &plugin.ServeOpts{ ProviderFunc: pingaccess.Provider, }) diff --git a/pingaccess/resource_pingaccess_application.go b/pingaccess/resource_pingaccess_application.go index d2e05d43..5d788c20 100644 --- a/pingaccess/resource_pingaccess_application.go +++ b/pingaccess/resource_pingaccess_application.go @@ -31,6 +31,7 @@ func resourcePingAccessApplicationSchema() map[string]*schema.Schema { "access_validator_id": { Type: schema.TypeInt, Optional: true, + Default: 0, }, "agent_id": { Type: schema.TypeInt, @@ -52,7 +53,7 @@ func resourcePingAccessApplicationSchema() map[string]*schema.Schema { "case_sensitive_path": { Type: schema.TypeBool, Optional: true, - Default: true, + Default: false, }, "context_root": { Type: schema.TypeString, @@ -248,20 +249,14 @@ func resourcePingAccessApplicationReadData(d *schema.ResourceData) *models.Appli SiteId: Int(siteID), SpaSupportEnabled: Bool(d.Get("spa_support_enabled").(bool)), VirtualHostIds: &vhIds, + CaseSensitivePath: Bool(d.Get("case_sensitive_path").(bool)), + AccessValidatorId: Int(d.Get("access_validator_id").(int)), } if *application.ApplicationType != "Dynamic" { application.DefaultAuthType = application.ApplicationType } - if _, ok := d.GetOk("access_validator_id"); ok { - application.AccessValidatorId = Int(d.Get("access_validator_id").(int)) - } - - if _, ok := d.GetOk("case_sensitive_path"); ok { - application.CaseSensitivePath = Bool(d.Get("case_sensitive_path").(bool)) - } - if _, ok := d.GetOk("description"); ok { application.Description = String(d.Get("description").(string)) } diff --git a/pingaccess/resource_pingaccess_application_test.go b/pingaccess/resource_pingaccess_application_test.go index cebadc96..fb1ee650 100644 --- a/pingaccess/resource_pingaccess_application_test.go +++ b/pingaccess/resource_pingaccess_application_test.go @@ -228,6 +228,7 @@ func Test_resourcePingAccessApplicationReadData(t *testing.T) { Application: models.ApplicationView{ Name: String("engine1"), ApplicationType: String("API"), + AccessValidatorId: Int(0), AgentId: Int(0), CaseSensitivePath: Bool(true), ContextRoot: String("/"), diff --git a/pingaccess/resource_pingaccess_https_listener.go b/pingaccess/resource_pingaccess_https_listener.go index c195df29..3dde8e46 100644 --- a/pingaccess/resource_pingaccess_https_listener.go +++ b/pingaccess/resource_pingaccess_https_listener.go @@ -43,11 +43,10 @@ func resourcePingAccessHTTPSListenerSchema() map[string]*schema.Schema { } } -func resourcePingAccessHTTPSListenerCreate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourcePingAccessHTTPSListenerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { svc := m.(paClient).HttpsListeners - input := httpsListeners.GetHttpsListenersCommandInput{} - result, _, err := svc.GetHttpsListenersCommand(&input) + result, _, err := svc.GetHttpsListenersCommand(&httpsListeners.GetHttpsListenersCommandInput{}) if err != nil { return diag.Errorf("unable to retrieving listener: %s", err) } @@ -56,7 +55,7 @@ func resourcePingAccessHTTPSListenerCreate(_ context.Context, d *schema.Resource for _, listener := range result.Items { if *listener.Name == name { d.SetId(listener.Id.String()) - return resourcePingAccessHTTPSListenerReadResult(d, listener) + return resourcePingAccessHTTPSListenerUpdate(ctx, d, m) } } return diag.Errorf("unable to manage listener: %s", err) From 59b4209eab7f5f32230ffa7b721d5231ee1ac56e Mon Sep 17 00:00:00 2001 From: Iain Adams Date: Tue, 7 Jul 2020 17:27:48 +0100 Subject: [PATCH 22/23] switch back to hsm provider for cross-version functional testing --- func-tests/hsm_provider.tf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/func-tests/hsm_provider.tf b/func-tests/hsm_provider.tf index 4ec7860e..819f64cc 100644 --- a/func-tests/hsm_provider.tf +++ b/func-tests/hsm_provider.tf @@ -1,12 +1,12 @@ resource "pingaccess_hsm_provider" "test" { count = var.pa6 ? 1 : 0 - class_name = "com.pingidentity.pa.hsm.pkcs11.plugin.PKCS11HsmProvider" + class_name = "com.pingidentity.pa.hsm.cloudhsm.plugin.AwsCloudHsmProvider" name = "demo" configuration = < Date: Tue, 7 Jul 2020 17:41:45 +0100 Subject: [PATCH 23/23] refactoring setting of client credentials --- .../resource_pingaccess_oauth_server.go | 21 +--------------- pingaccess/resource_pingaccess_websession.go | 23 ++---------------- pingaccess/structures.go | 24 +++++++++++++++++++ 3 files changed, 27 insertions(+), 41 deletions(-) diff --git a/pingaccess/resource_pingaccess_oauth_server.go b/pingaccess/resource_pingaccess_oauth_server.go index 8cd5e578..914aded3 100644 --- a/pingaccess/resource_pingaccess_oauth_server.go +++ b/pingaccess/resource_pingaccess_oauth_server.go @@ -134,26 +134,7 @@ func resourcePingAccessOAuthServerReadResult(d *schema.ResourceData, input *mode diags = append(diags, diag.FromErr(err)...) } if input.ClientCredentials != nil && input.ClientCredentials.ClientSecret != nil { - pw, ok := d.GetOk("client_credentials.0.client_secret.0.value") - creds := flattenOAuthClientCredentialsView(input.ClientCredentials) - if trackPasswords { - enc, encOk := d.GetOk("client_credentials.0.client_secret.0.encrypted_value") - creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = pw - if err := d.Set("client_credentials", creds); err != nil { - diags = append(diags, diag.FromErr(err)...) - } - if encOk && enc.(string) != "" && enc.(string) != *input.ClientCredentials.ClientSecret.EncryptedValue { - creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = "" - } - } else { - //legacy behaviour - if ok { - creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = pw - } - } - if err := d.Set("client_credentials", creds); err != nil { - diags = append(diags, diag.FromErr(err)...) - } + setClientCredentials(d, input.ClientCredentials, trackPasswords, &diags) } return diags diff --git a/pingaccess/resource_pingaccess_websession.go b/pingaccess/resource_pingaccess_websession.go index ec78ea7f..804e4af8 100644 --- a/pingaccess/resource_pingaccess_websession.go +++ b/pingaccess/resource_pingaccess_websession.go @@ -226,27 +226,8 @@ func resourcePingAccessWebSessionReadResult(d *schema.ResourceData, input *model diags = append(diags, diag.FromErr(err)...) } } - if input.ClientCredentials != nil { - pw, ok := d.GetOk("client_credentials.0.client_secret.0.value") - creds := flattenOAuthClientCredentialsView(input.ClientCredentials) - if trackPasswords { - enc, encOk := d.GetOk("client_credentials.0.client_secret.0.encrypted_value") - creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = pw - if err := d.Set("client_credentials", creds); err != nil { - diags = append(diags, diag.FromErr(err)...) - } - if encOk && enc.(string) != "" && enc.(string) != *input.ClientCredentials.ClientSecret.EncryptedValue { - creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = "" - } - } else { - //legacy behaviour - if ok { - creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = pw - } - } - if err := d.Set("client_credentials", creds); err != nil { - diags = append(diags, diag.FromErr(err)...) - } + if input.ClientCredentials != nil && input.ClientCredentials.ClientSecret != nil { + setClientCredentials(d, input.ClientCredentials, trackPasswords, &diags) } return diags } diff --git a/pingaccess/structures.go b/pingaccess/structures.go index 11049dd1..329a25ec 100644 --- a/pingaccess/structures.go +++ b/pingaccess/structures.go @@ -532,3 +532,27 @@ func hashString(s string) int { // v == MinInt return 0 } + + +func setClientCredentials(d *schema.ResourceData, input *models.OAuthClientCredentialsView, trackPasswords bool, diags *diag.Diagnostics) { + pw, ok := d.GetOk("client_credentials.0.client_secret.0.value") + creds := flattenOAuthClientCredentialsView(input) + if trackPasswords { + enc, encOk := d.GetOk("client_credentials.0.client_secret.0.encrypted_value") + creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = pw + if err := d.Set("client_credentials", creds); err != nil { + *diags = append(*diags, diag.FromErr(err)...) + } + if encOk && enc.(string) != "" && enc.(string) != *input.ClientSecret.EncryptedValue { + creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = "" + } + } else { + //legacy behaviour + if ok { + creds[0]["client_secret"].([]map[string]interface{})[0]["value"] = pw + } + } + if err := d.Set("client_credentials", creds); err != nil { + *diags = append(*diags, diag.FromErr(err)...) + } +}