From 0344a71a780b4c22ee8c6a8c79b18099e4373fc8 Mon Sep 17 00:00:00 2001 From: KH <96801545+kleineshertz@users.noreply.github.com> Date: Fri, 5 Jul 2024 15:16:19 -0700 Subject: [PATCH] Added assumerole (#2) --- 5_undeploy.sh | 1 - README.md | 138 ++++++++++++++++- pkg/cld/cldaws/resources.go | 146 ++++++++++-------- pkg/cmd/capideploy/capideploy.go | 9 +- pkg/provider/deploy_provider.go | 69 ++++++++- .../scripts/common/iam_aws_credentials.sh | 14 -- sample.jsonnet | 4 - 7 files changed, 283 insertions(+), 98 deletions(-) diff --git a/5_undeploy.sh b/5_undeploy.sh index 02b1b1b..f452f9e 100755 --- a/5_undeploy.sh +++ b/5_undeploy.sh @@ -7,7 +7,6 @@ set -x # Print commands ./capideploy stop_services "*" -p sample.jsonnet -v >> undeploy.log -set -e # Exit on failure ./capideploy detach_volumes "bastion" -p sample.jsonnet -v >> undeploy.log ./capideploy delete_instances "*" -p sample.jsonnet -v >> undeploy.log ./capideploy delete_volumes "*" -p sample.jsonnet -v >> undeploy.log diff --git a/README.md b/README.md index c0d982f..38cde59 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,25 @@ # IAM settings -You can run capideploy under your AWS root account, but this is generally discouraged. Let's pretend that capideploy is executed by a third party and you want to grant that third party some specific permissions that allow that third party to create Capillaries deployment in AWS. Giving a third party access to your AWS resources is a standard practice and the recommended way to do that is to use IAM roles. This section discusses the AWS IAM preparation steps to create the necessary role structure. Basic familiarity with AWS console is required. +You can run capideploy under your AWS root account, but this is generally discouraged. Chances are you want to run capideploy as some IAM user, or even better, let's pretend that capideploy is executed by some third party or a temporary contractor. You want to grant that third party some specific permissions that allow that third party to create Capillaries deployment in your AWS workspace. Giving a third party access to your AWS resources is a standard practice and the recommended way to do that is to use IAM roles. This section discusses the AWS IAM preparation steps to create the necessary role structure. Basic familiarity with AWS console is required. ## Users and groups Let's assume all capideploy activities are performed on behalf of an IAM user named `UserCapideployOperator`. As a first step, create this user in `IAM->Users` section of AWS console. In `IAM->User groups`, create a group `GroupCapideployOperators` and add `UserCapideployOperator` to it. +Create credentials for `UserCapideployOperator` and save them in UserCapideployOperator.rc: +``` +export AWS_ACCESS_KEY_ID=AK... +export AWS_SECRET_ACCESS_KEY=... +export AWS_DEFAULT_REGION=us-east-1 +``` + +If you want to run capideploy unnder this account (not under some SaaS provider account as described below), run this .rc file before running capideploy, so AWS SDK can use those credentials. + ## Policies and roles ### PolicyAccessCapillariesTestbucket and RoleAccessCapillariesTestbucket -Your AWS deployment will need to read and write files from/to S3 bucket. As per [Capillaries S3 instructions](https://github.com/capillariesio/capillaries/blob/main/doc/s3.md), we assume that you already have an S3 bucket for your future Capillaries deployment, let's assume the name of the bucket is `capillaries-testbucket` and it has `Block all public access` setting on. And here is the key difference: +Your AWS deployment will need to read and write files from/to S3 bucket. As per [Capillaries S3 instructions](https://github.com/capillariesio/capillaries/blob/main/doc/s3.md), we assume that you already have an S3 bucket for your future Capillaries deployment, let's assume the name of the bucket is `capillaries-testbucket` (in fact, it will be more like `acmme-corp-prod-files`) and it has `Block all public access` setting on (assuming you do not want strangers to see your files). And here is the key difference: - Capillaries test S3 bucket access described in that doc uses user-based access model (bucket policy explicitly gives the user `arn:aws:iam:::user/UserAccessCapillariesTestbucket` access to the bucket); - capideploy S3 bucket access model uses a separate policy and a separate role with this policy attached, and Capillaries instances can assume that role. @@ -85,7 +94,6 @@ In IAM->Policies, create a customer-managed policy PolicyCapideployOperator: "Version": "2012-10-17", "Statement": [ { - "Sid": "PolicyCapideployOperatorCreateInfra", "Effect": "Allow", "Action": [ "ec2:AllocateAddress", @@ -139,7 +147,6 @@ In IAM->Policies, create a customer-managed policy PolicyCapideployOperator: "Resource": "*" }, { - "Sid": "PolicyCapideployOperatorPassRoleAccessBucket", "Effect": "Allow", "Action": "iam:PassRole", "Resource": "arn:aws:iam:::role/RoleAccessCapillariesTestbucket" @@ -160,6 +167,127 @@ grep -r -e "tClient\.[A-Za-z]*" --include "*.go" In `IAM->User groups->GroupCapideployOperators->Permissions`, attach `PolicyCapideployOperator`. +# IAM Settings - SaaS scenario + +capideploy can be executed by a third-party, like some SaaS provider or a contractor who needs access to your AWS resources. If you have to do that, the following additional settings are required. Assuming "you" are the "customer" of the SaaS provider. + +## SaaS user + +In SaaS provider console `IAM->Users`, create a new user `UserSaasCapideployOperator`. This will be the account capideply will be running under. Create credentials for `UserSaasCapideployOperator` and save them in UserSaasCapideployOperator.rc: +``` +export AWS_ACCESS_KEY_ID=AK... +export AWS_SECRET_ACCESS_KEY=... +export AWS_DEFAULT_REGION=us-east-1 +``` + +If you want to run capideploy unnder this SaaS account (not under your `UserCapideployOperator` account as described above), run this .rc file before running capideploy, so AWS SDK can use those credentials. + +## SaaS policy + +In SaaS provider console `IAM->Policies`, create a new policy `PolicySaasCapideployOperator` as follows: +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AllocateAddress", + "ec2:AssociateAddress", + "ec2:AssociateIamInstanceProfile", + "ec2:AssociateRouteTable", + "ec2:AttachInternetGateway", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateImage", + "ec2:CreateInternetGateway", + "ec2:CreateNatGateway", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateVpc", + "ec2:DeleteInternetGateway", + "ec2:DeleteNatGateway", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSnapshot", + "ec2:DeleteSubnet", + "ec2:DeleteVolume", + "ec2:DeleteVpc", + "ec2:DeregisterImage", + "ec2:DescribeAddresses", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInternetGateways", + "ec2:DescribeKeyPairs", + "ec2:DescribeNatGateways", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSnapshots", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVpcs", + "ec2:DetachInternetGateway", + "ec2:DetachVolume", + "ec2:ReleaseAddress", + "ec2:RunInstances", + "ec2:TerminateInstances", + "iam:GetInstanceProfile", + "tag:GetResources", + "iam:PassRole", + "sts:AssumeRole" + ], + "Resource": "*" + } + ] +} +``` + +This policy is very similar to your `PolicyCapideployOperator`, but there are two important differences: +- it allows `iam:PassRole` for *all* resources (because SaaS provider user will work with many customers, it will need access not only to your `arn:aws:iam:::role/RoleAccessCapillariesTestbucket`, but to all relevant roles from many customers) +- it allows `sts:AssumeRole`, capideploy will call AWS API `AssumeRole("arn:aws:iam:::role/RoleCapideployOperator", externalId)` when establishing an AWS service session, so it will create/delete all resources on your (``) behalf. + +Attach `PolicySaasCapideployOperator` to `UserSaasCapideployOperator`. + +## SaaS customer - trust UserSaasCapideployOperator + +In your AWS console's `IAM->Roles->RoleCapideployOperator->Trusted relationships`, add: +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::user/UserSaasCapideployOperator" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "sts:ExternalId": "someExternalId" + } + } + } + ] +} +``` + +This will allow `UserSaasCapideployOperator` to perform all actions listed in your (customer's) `PolicySaasCapideployOperator` on your (customer's) AWS resources. + +## capideploy SaaS parameters + +If you want to run capideploy as SaaS provider's `UserSaasCapideployOperator`, make sure to specify `-r` and `-e` parameters, for example: +```shell +./capideploy list_deployment_resources -p sample.jsonnet -r arn:aws:iam:::role/RoleCapideployOperator -e someExternalId +``` + +They will tell capideploy to assume the specified role before performing any action, so it will look like someone from your AWS account performs them. + # Environment variables used by Capideploy Sample .rc file to run before Capildeploy contains variables used in the .jsonnet file: @@ -187,7 +315,7 @@ export CAPIDEPLOY_RABBITMQ_ADMIN_PASS=... export CAPIDEPLOY_RABBITMQ_USER_NAME=... export CAPIDEPLOY_RABBITMQ_USER_PASS=... -# ~/.aws/config: default/region (without it, AWS API will not locate S3 buckets) +# ~/.aws/config: default/region (without it, AWS API will not locate S3 buckets, it goes to /home/$SSH_USER/.aws/config) export CAPIDEPLOY_S3_AWS_DEFAULT_REGION=us-east-1 # Capideploy will use this instance profile when creating instances that need access to S3 bucket diff --git a/pkg/cld/cldaws/resources.go b/pkg/cld/cldaws/resources.go index a2d663d..a588dd6 100644 --- a/pkg/cld/cldaws/resources.go +++ b/pkg/cld/cldaws/resources.go @@ -14,34 +14,39 @@ import ( "github.com/capillariesio/capillaries-deploy/pkg/l" ) -type BilledState string +type ResourceBilledState string const ( - BilledStateUnknown BilledState = "unknown" - BilledStateBilled BilledState = "billed" - BilledStateUnbilled BilledState = "unbilled" + ResourceBilledStateUnknown ResourceBilledState = "unknown" + ResourceBilledStateActive ResourceBilledState = "active" + ResourceBilledStateTerminated ResourceBilledState = "terminated" ) +const DeploymentNameTagName string = "DeploymentName" +const DeploymentOperatorTagName string = "DeploymentOperator" +const DeploymentOperatorTagValue string = "capideploy" + type Resource struct { - Svc string - Type string - Id string - Name string - State string - Billed BilledState + DeploymentName string + Svc string + Type string + Id string + Name string + State string + BilledState ResourceBilledState } func (r *Resource) String() string { - return fmt.Sprintf("%s,%s,%s,%s,%s,%s", r.Svc, r.Type, r.Name, r.Id, r.State, r.Billed) + return fmt.Sprintf("%s, %s,%s,%s,%s,%s,%s", r.DeploymentName, r.Svc, r.Type, r.Name, r.Id, r.State, r.BilledState) } func arnToResource(arn string) Resource { r := Resource{ - Svc: "unknown", - Type: "unknown", - Id: "unknown", - State: "unknown", - Billed: BilledStateUnknown, + Svc: "unknown", + Type: "unknown", + Id: "unknown", + State: "unknown", + BilledState: ResourceBilledStateUnknown, } s := strings.Split(arn, "/") if len(s) >= 2 { @@ -57,50 +62,50 @@ func arnToResource(arn string) Resource { return r } -func getInstanceBilledState(state types.InstanceStateName) BilledState { +func getInstanceBilledState(state types.InstanceStateName) ResourceBilledState { if state == types.InstanceStateNamePending || state == types.InstanceStateNameRunning { - return BilledStateBilled + return ResourceBilledStateActive } else { - return BilledStateUnbilled + return ResourceBilledStateTerminated } } -func getVolumeBilledState(state types.VolumeState) BilledState { +func getVolumeBilledState(state types.VolumeState) ResourceBilledState { if state == types.VolumeStateAvailable || state == types.VolumeStateCreating || state == types.VolumeStateInUse { - return BilledStateBilled + return ResourceBilledStateActive } else { - return BilledStateUnbilled + return ResourceBilledStateTerminated } } -func getNatGatewayBilledState(state types.NatGatewayState) BilledState { +func getNatGatewayBilledState(state types.NatGatewayState) ResourceBilledState { if state == types.NatGatewayStatePending || state == types.NatGatewayStateAvailable { - return BilledStateBilled + return ResourceBilledStateActive } else { - return BilledStateUnbilled + return ResourceBilledStateTerminated } } -func getVpcBilledState(state types.VpcState) BilledState { +func getVpcBilledState(state types.VpcState) ResourceBilledState { if state == types.VpcStatePending || state == types.VpcStateAvailable { - return BilledStateBilled + return ResourceBilledStateActive } else { - return BilledStateUnbilled + return ResourceBilledStateTerminated } } -func getImageBilledState(state types.ImageState) BilledState { +func getImageBilledState(state types.ImageState) ResourceBilledState { if state == types.ImageStateAvailable || state == types.ImageStateDisabled || state == types.ImageStateError || state == types.ImageStatePending || state == types.ImageStateTransient { - return BilledStateBilled + return ResourceBilledStateActive } else { - return BilledStateUnbilled + return ResourceBilledStateTerminated } } -func getSnapshotBilledState(_ types.SnapshotState) BilledState { - return BilledStateBilled +func getSnapshotBilledState(_ types.SnapshotState) ResourceBilledState { + return ResourceBilledStateActive } -func getResourceState(ec2Client *ec2.Client, goCtx context.Context, r *Resource) (string, BilledState, error) { +func getResourceState(ec2Client *ec2.Client, goCtx context.Context, r *Resource) (string, ResourceBilledState, error) { switch r.Svc { case "ec2": switch r.Type { @@ -109,7 +114,7 @@ func getResourceState(ec2Client *ec2.Client, goCtx context.Context, r *Resource) if err != nil { return "", "", err } - return *out.Addresses[0].PublicIp, BilledStateBilled, nil + return *out.Addresses[0].PublicIp, ResourceBilledStateActive, nil case "vpc": out, err := ec2Client.DescribeVpcs(goCtx, &ec2.DescribeVpcsInput{VpcIds: []string{r.Id}}) if err != nil { @@ -121,36 +126,36 @@ func getResourceState(ec2Client *ec2.Client, goCtx context.Context, r *Resource) if err != nil { return "", "", err } - return string(out.Subnets[0].State), BilledStateBilled, nil + return string(out.Subnets[0].State), ResourceBilledStateActive, nil case "security-group": _, err := ec2Client.DescribeSecurityGroups(goCtx, &ec2.DescribeSecurityGroupsInput{GroupIds: []string{r.Id}}) if err != nil { return "", "", err } - return "present", BilledStateBilled, nil + return "present", ResourceBilledStateActive, nil case "route-table": out, err := ec2Client.DescribeRouteTables(goCtx, &ec2.DescribeRouteTablesInput{RouteTableIds: []string{r.Id}}) if err != nil { if strings.Contains(err.Error(), "does not exist") { - return "doesnotexist", BilledStateUnbilled, nil + return "doesnotexist", ResourceBilledStateTerminated, nil } return "", "", err } - return fmt.Sprintf("%droutes", len(out.RouteTables[0].Routes)), BilledStateBilled, nil + return fmt.Sprintf("%droutes", len(out.RouteTables[0].Routes)), ResourceBilledStateActive, nil case "instance": out, err := ec2Client.DescribeInstances(goCtx, &ec2.DescribeInstancesInput{InstanceIds: []string{r.Id}}) if err != nil { return "", "", err } if len(out.Reservations) == 0 || len(out.Reservations[0].Instances) == 0 { - return "notfound", BilledStateUnbilled, nil + return "notfound", ResourceBilledStateTerminated, nil } return string(out.Reservations[0].Instances[0].State.Name), getInstanceBilledState(out.Reservations[0].Instances[0].State.Name), nil case "volume": out, err := ec2Client.DescribeVolumes(goCtx, &ec2.DescribeVolumesInput{VolumeIds: []string{r.Id}}) if err != nil { if strings.Contains(err.Error(), "does not exist") { - return "doesnotexist", BilledStateUnbilled, nil + return "doesnotexist", ResourceBilledStateTerminated, nil } return "", "", err } @@ -159,7 +164,7 @@ func getResourceState(ec2Client *ec2.Client, goCtx context.Context, r *Resource) out, err := ec2Client.DescribeNatGateways(goCtx, &ec2.DescribeNatGatewaysInput{NatGatewayIds: []string{r.Id}}) if err != nil { if strings.Contains(err.Error(), "was not found") { - return "notfound", BilledStateUnbilled, nil + return "notfound", ResourceBilledStateTerminated, nil } return "", "", err } @@ -168,16 +173,16 @@ func getResourceState(ec2Client *ec2.Client, goCtx context.Context, r *Resource) out, err := ec2Client.DescribeInternetGateways(goCtx, &ec2.DescribeInternetGatewaysInput{InternetGatewayIds: []string{r.Id}}) if err != nil { if strings.Contains(err.Error(), "does not exist") { - return "doesnotexist", BilledStateUnbilled, nil + return "doesnotexist", ResourceBilledStateTerminated, nil } return "", "", err } - return fmt.Sprintf("%dattachments", len(out.InternetGateways[0].Attachments)), BilledStateBilled, nil + return fmt.Sprintf("%dattachments", len(out.InternetGateways[0].Attachments)), ResourceBilledStateActive, nil case "image": out, err := ec2Client.DescribeImages(goCtx, &ec2.DescribeImagesInput{ImageIds: []string{r.Id}}) if err != nil { if strings.Contains(err.Error(), "does not exist") { - return "doesnotexist", BilledStateUnbilled, nil + return "doesnotexist", ResourceBilledStateTerminated, nil } return "", "", err } @@ -187,7 +192,7 @@ func getResourceState(ec2Client *ec2.Client, goCtx context.Context, r *Resource) out, err := ec2Client.DescribeSnapshots(goCtx, &ec2.DescribeSnapshotsInput{SnapshotIds: []string{r.Id}}) if err != nil { if strings.Contains(err.Error(), "does not exist") { - return "doesnotexist", BilledStateUnbilled, nil + return "doesnotexist", ResourceBilledStateTerminated, nil } return "", "", err } @@ -200,46 +205,53 @@ func getResourceState(ec2Client *ec2.Client, goCtx context.Context, r *Resource) } } -func getResourceNameTag(ec2Client *ec2.Client, goCtx context.Context, resourceId string) (string, error) { +func getResourceDeploymentNameAndNameTags(ec2Client *ec2.Client, goCtx context.Context, resourceId string) (string, string, error) { out, err := ec2Client.DescribeTags(goCtx, &ec2.DescribeTagsInput{Filters: []types.Filter{{ Name: aws.String("resource-id"), Values: []string{resourceId}}}}) if err != nil { - return "", err + return "", "", err } + deploymentNameTagValue := "" + resourceNameTagValue := "" for _, tagDesc := range out.Tags { if *tagDesc.Key == "Name" { - return *tagDesc.Value, nil + resourceNameTagValue = *tagDesc.Value + } else if *tagDesc.Key == DeploymentNameTagName { + deploymentNameTagValue = *tagDesc.Value } } - return "", nil + return deploymentNameTagValue, resourceNameTagValue, nil } -func GetResourcesByTag(tClient *tagging.Client, ec2Client *ec2.Client, goCtx context.Context, lb *l.LogBuilder, region string, tagName string, tagVal string) ([]string, error) { +func GetResourcesByTag(tClient *tagging.Client, ec2Client *ec2.Client, goCtx context.Context, lb *l.LogBuilder, region string, tagFilters []taggingTypes.TagFilter, readState bool) ([]*Resource, error) { resources := make([]*Resource, 0) paginationToken := "" for { out, err := tClient.GetResources(goCtx, &tagging.GetResourcesInput{ ResourcesPerPage: aws.Int32(100), PaginationToken: &paginationToken, - TagFilters: []taggingTypes.TagFilter{{Key: aws.String(tagName), Values: []string{tagVal}}}}) + TagFilters: tagFilters}) if err != nil { - return []string{}, err + return []*Resource{}, err } for _, rtMapping := range out.ResourceTagMappingList { res := arnToResource(*rtMapping.ResourceARN) - state, billedState, err := getResourceState(ec2Client, goCtx, &res) - if err != nil { - lb.Add(err.Error()) - } else { - res.State = state - res.Billed = billedState + if readState { + state, billedState, err := getResourceState(ec2Client, goCtx, &res) + if err != nil { + lb.Add(err.Error()) + } else { + res.State = state + res.BilledState = billedState + } } - name, err := getResourceNameTag(ec2Client, goCtx, res.Id) + deploymentName, resourceName, err := getResourceDeploymentNameAndNameTags(ec2Client, goCtx, res.Id) if err != nil { lb.Add(err.Error()) } else { - res.Name = name + res.DeploymentName = deploymentName + res.Name = resourceName } resources = append(resources, &res) } @@ -250,7 +262,11 @@ func GetResourcesByTag(tClient *tagging.Client, ec2Client *ec2.Client, goCtx con } sort.Slice(resources, func(i, j int) bool { - if resources[i].Svc < resources[j].Svc { + if resources[i].DeploymentName < resources[j].DeploymentName { + return true + } else if resources[i].DeploymentName > resources[j].DeploymentName { + return false + } else if resources[i].Svc < resources[j].Svc { return true } else if resources[i].Svc > resources[j].Svc { return false @@ -271,9 +287,5 @@ func GetResourcesByTag(tClient *tagging.Client, ec2Client *ec2.Client, goCtx con } }) - result := make([]string, len(resources)) - for i, r := range resources { - result[i] = r.String() - } - return result, nil + return resources, nil } diff --git a/pkg/cmd/capideploy/capideploy.go b/pkg/cmd/capideploy/capideploy.go index 0ebf3cb..1fe221f 100644 --- a/pkg/cmd/capideploy/capideploy.go +++ b/pkg/cmd/capideploy/capideploy.go @@ -19,6 +19,7 @@ import ( ) const ( + CmdListDeployments string = "list_deployments" CmdListDeploymentResources string = "list_deployment_resources" CmdCreateFloatingIps string = "create_floating_ips" CmdDeleteFloatingIps string = "delete_floating_ips" @@ -124,6 +125,7 @@ Commands: %s -p %s -p %s -p + %s -p %s -p %s -p %s -p @@ -139,6 +141,7 @@ Commands: %s -p %s -p `, + CmdListDeployments, CmdListDeploymentResources, CmdCreateFloatingIps, @@ -238,6 +241,8 @@ func main() { argNumberOfRepetitions := commonArgs.Int("n", 1, "Number of repetitions") argShowProjectDetails := commonArgs.Bool("s", false, "Show project details (may contain sensitive info)") argIgnoreAttachedVolumes := commonArgs.Bool("i", false, "Ignore attached volumes on instance delete") + argAssumeRole := commonArgs.String("r", "", "A role from another AWS account to assume, act like a third-party service") + argAssumeRoleExternalId := commonArgs.String("e", "", "When a role from another AWS account is assumed, use this external-id (optional, but encouraged)") cmdStartTs := time.Now() @@ -254,6 +259,7 @@ func main() { var prjErr error singleThreadCommands := map[string]SingleThreadCmdHandler{ + CmdListDeployments: nil, CmdListDeploymentResources: nil, CmdCreateFloatingIps: nil, CmdDeleteFloatingIps: nil, @@ -277,10 +283,11 @@ func main() { log.Fatalf(prjErr.Error()) } - deployProvider, deployProviderErr := provider.DeployProviderFactory(project, context.TODO(), *argVerbosity) + deployProvider, deployProviderErr := provider.DeployProviderFactory(project, context.TODO(), &provider.AssumeRoleConfig{RoleArn: *argAssumeRole, ExternalId: *argAssumeRoleExternalId}, *argVerbosity) if deployProviderErr != nil { log.Fatalf(deployProviderErr.Error()) } + singleThreadCommands[CmdListDeployments] = deployProvider.ListDeployments singleThreadCommands[CmdListDeploymentResources] = deployProvider.ListDeploymentResources singleThreadCommands[CmdCreateFloatingIps] = deployProvider.CreateFloatingIps singleThreadCommands[CmdDeleteFloatingIps] = deployProvider.DeleteFloatingIps diff --git a/pkg/provider/deploy_provider.go b/pkg/provider/deploy_provider.go index 90a2be0..5d56994 100644 --- a/pkg/provider/deploy_provider.go +++ b/pkg/provider/deploy_provider.go @@ -7,8 +7,11 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi" + taggingTypes "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi/types" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/capillariesio/capillaries-deploy/pkg/cld/cldaws" "github.com/capillariesio/capillaries-deploy/pkg/l" "github.com/capillariesio/capillaries-deploy/pkg/prj" @@ -20,8 +23,6 @@ type AwsCtx struct { TaggingClient *resourcegroupstaggingapi.Client } -const TagCapiDeploy string = "CapiDeploy" - type DeployCtx struct { //PrjPair *prj.ProjectPair Project *prj.Project @@ -32,6 +33,7 @@ type DeployCtx struct { } type DeployProvider interface { GetCtx() *DeployCtx + ListDeployments() (l.LogMsg, error) ListDeploymentResources() (l.LogMsg, error) CreateFloatingIps() (l.LogMsg, error) DeleteFloatingIps() (l.LogMsg, error) @@ -62,19 +64,35 @@ func (p *AwsDeployProvider) GetCtx() *DeployCtx { return p.Ctx } -func DeployProviderFactory(project *prj.Project, goCtx context.Context, isVerbose bool) (DeployProvider, error) { +type AssumeRoleConfig struct { + RoleArn string + ExternalId string +} + +func DeployProviderFactory(project *prj.Project, goCtx context.Context, assumeRoleCfg *AssumeRoleConfig, isVerbose bool) (DeployProvider, error) { if project.DeployProviderName == prj.DeployProviderAws { cfg, err := config.LoadDefaultConfig(goCtx) if err != nil { return nil, err } + if assumeRoleCfg != nil && assumeRoleCfg.RoleArn != "" { + creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), assumeRoleCfg.RoleArn, + func(o *stscreds.AssumeRoleOptions) { + o.ExternalID = aws.String(assumeRoleCfg.ExternalId) + o.RoleSessionName = "third-party-capideploy-assumes-role-provided-by-customer" + }) + cfg.Credentials = aws.NewCredentialsCache(creds) + } + return &AwsDeployProvider{ Ctx: &DeployCtx{ Project: project, GoCtx: goCtx, IsVerbose: isVerbose, - Tags: map[string]string{TagCapiDeploy: project.DeploymentName}, + Tags: map[string]string{ + cldaws.DeploymentNameTagName: project.DeploymentName, + cldaws.DeploymentOperatorTagName: cldaws.DeploymentOperatorTagValue}, Aws: &AwsCtx{ Ec2Client: ec2.NewFromConfig(cfg), TaggingClient: resourcegroupstaggingapi.NewFromConfig(cfg), @@ -105,13 +123,52 @@ export BASTION_IP=%s prj.SshConfig.PrivateKeyPath, prj.SshConfig.BastionExternalIp) } +func (p *AwsDeployProvider) ListDeployments() (l.LogMsg, error) { + lb := l.NewLogBuilder(l.CurFuncName(), p.GetCtx().IsVerbose) + resources, err := cldaws.GetResourcesByTag(p.GetCtx().Aws.TaggingClient, p.GetCtx().Aws.Ec2Client, p.GetCtx().GoCtx, lb, p.GetCtx().Aws.Config.Region, + []taggingTypes.TagFilter{{Key: aws.String(cldaws.DeploymentOperatorTagName), Values: []string{cldaws.DeploymentOperatorTagValue}}}, false) + if err != nil { + return lb.Complete(err) + } + deploymentResCount := map[string]int{} + for _, res := range resources { + if deploymentNameCount, ok := deploymentResCount[res.DeploymentName]; ok { + deploymentResCount[res.DeploymentName] = deploymentNameCount + 1 + } else { + deploymentResCount[res.DeploymentName] = 1 + } + } + deploymentStrings := make([]string, len(deploymentResCount)) + deploymentIdx := 0 + totalResourceCount := 0 + for deploymentName, deploymentResCount := range deploymentResCount { + deploymentStrings[deploymentIdx] = fmt.Sprintf("%s,%d", deploymentName, deploymentResCount) + deploymentIdx++ + totalResourceCount += deploymentResCount + } + fmt.Printf("%s\n", strings.Join(deploymentStrings, "\n")) + fmt.Printf("Deployments: %d, resources: %d\n", len(deploymentResCount), totalResourceCount) + return lb.Complete(nil) +} func (p *AwsDeployProvider) ListDeploymentResources() (l.LogMsg, error) { lb := l.NewLogBuilder(l.CurFuncName(), p.GetCtx().IsVerbose) - resources, err := cldaws.GetResourcesByTag(p.GetCtx().Aws.TaggingClient, p.GetCtx().Aws.Ec2Client, p.GetCtx().GoCtx, lb, p.GetCtx().Aws.Config.Region, TagCapiDeploy, p.Ctx.Project.DeploymentName) + resources, err := cldaws.GetResourcesByTag(p.GetCtx().Aws.TaggingClient, p.GetCtx().Aws.Ec2Client, p.GetCtx().GoCtx, lb, p.GetCtx().Aws.Config.Region, + []taggingTypes.TagFilter{ + {Key: aws.String(cldaws.DeploymentOperatorTagName), Values: []string{cldaws.DeploymentOperatorTagValue}}, + {Key: aws.String(cldaws.DeploymentNameTagName), Values: []string{p.Ctx.Project.DeploymentName}}}, true) if err != nil { return lb.Complete(err) } - fmt.Printf("%s\n", strings.Join(resources, "\n")) + resourceStrings := make([]string, len(resources)) + activeCount := 0 + for resIdx, res := range resources { + resourceStrings[resIdx] = res.String() + if res.BilledState != cldaws.ResourceBilledStateTerminated { + activeCount++ + } + } + fmt.Printf("%s\n", strings.Join(resourceStrings, "\n")) + fmt.Printf("Total: %d, potentially billed: %d\n", len(resources), activeCount) return lb.Complete(nil) } diff --git a/pkg/rexec/scripts/common/iam_aws_credentials.sh b/pkg/rexec/scripts/common/iam_aws_credentials.sh index bdf7bd5..fb263c5 100644 --- a/pkg/rexec/scripts/common/iam_aws_credentials.sh +++ b/pkg/rexec/scripts/common/iam_aws_credentials.sh @@ -3,16 +3,6 @@ if [ "$SSH_USER" = "" ]; then exit 1 fi -# Not used, see associated instance profiles -# if [ "$S3_IAM_USER_AWS_ACCESS_KEY_ID" = "" ]; then -# echo Error, missing: S3_IAM_USER_AWS_ACCESS_KEY_ID=AK... -# exit 1 -# fi -# if [ "$S3_IAM_USER_AWS_SECRET_ACCESS_KEY" = "" ]; then -# echo Error, missing: S3_IAM_USER_AWS_SECRET_ACCESS_KEY=... -# exit 1 -# fi - if [ "$S3_AWS_DEFAULT_REGION" = "" ]; then echo Error, missing: S3_AWS_DEFAULT_REGION=us-east-1 exit 1 @@ -22,10 +12,6 @@ fi rm -fR /home/$SSH_USER/.aws mkdir -p /home/$SSH_USER/.aws -# sudo echo "[default]" > /home/$SSH_USER/.aws/credentials -# sudo echo "aws_access_key_id=$S3_IAM_USER_AWS_ACCESS_KEY_ID" >> /home/$SSH_USER/.aws/credentials -# sudo echo "aws_secret_access_key=$S3_IAM_USER_AWS_SECRET_ACCESS_KEY" >> /home/$SSH_USER/.aws/credentials - sudo echo "[default]" > /home/$SSH_USER/.aws/config sudo echo "region=$S3_AWS_DEFAULT_REGION" >> /home/$SSH_USER/.aws/config sudo echo "output=json" >> /home/$SSH_USER/.aws/config diff --git a/sample.jsonnet b/sample.jsonnet index 3440b75..ed78c1a 100644 --- a/sample.jsonnet +++ b/sample.jsonnet @@ -297,8 +297,6 @@ env: { CAPILLARIES_RELEASE_URL: '{CAPIDEPLOY_CAPILLARIES_RELEASE_URL}', OS_ARCH: os_arch, - // S3_IAM_USER_AWS_ACCESS_KEY_ID: '{CAPIDEPLOY_S3_IAM_USER_AWS_ACCESS_KEY_ID}', - // S3_IAM_USER_AWS_SECRET_ACCESS_KEY: '{CAPIDEPLOY_S3_IAM_USER_AWS_SECRET_ACCESS_KEY}', S3_AWS_DEFAULT_REGION: '{CAPIDEPLOY_S3_AWS_DEFAULT_REGION}', AMQP_URL: 'amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@' + rabbitmq_ip + '/', CASSANDRA_HOSTS: cassandra_hosts, @@ -497,8 +495,6 @@ INTERNAL_BASTION_IP: internal_bastion_ip, CAPILLARIES_RELEASE_URL: '{CAPIDEPLOY_CAPILLARIES_RELEASE_URL}', OS_ARCH: os_arch, - // S3_IAM_USER_AWS_ACCESS_KEY_ID: '{CAPIDEPLOY_S3_IAM_USER_AWS_ACCESS_KEY_ID}', - // S3_IAM_USER_AWS_SECRET_ACCESS_KEY: '{CAPIDEPLOY_S3_IAM_USER_AWS_SECRET_ACCESS_KEY}', S3_AWS_DEFAULT_REGION: '{CAPIDEPLOY_S3_AWS_DEFAULT_REGION}', AMQP_URL: 'amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@' + rabbitmq_ip + '/', CASSANDRA_HOSTS: cassandra_hosts,