Skip to content
This repository has been archived by the owner on Mar 29, 2023. It is now read-only.

Commit

Permalink
Merge pull request #1 from gruntwork-io/module-design
Browse files Browse the repository at this point in the history
[WIP] GKE Module Design
  • Loading branch information
robmorgan authored Jan 31, 2019
2 parents d6b8f35 + 8103773 commit 83d6816
Show file tree
Hide file tree
Showing 24 changed files with 1,639 additions and 3 deletions.
105 changes: 105 additions & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
defaults: &defaults
machine: true
environment:
GRUNTWORK_INSTALLER_VERSION: v0.0.21
TERRATEST_LOG_PARSER_VERSION: v0.13.13
MODULE_CI_VERSION: v0.13.3
TERRAFORM_VERSION: 0.11.8
TERRAGRUNT_VERSION: NONE
PACKER_VERSION: NONE
GOLANG_VERSION: 1.11.2
K8S_VERSION: v1.10.0
KUBECONFIG: /home/circleci/.kube/config

install_gruntwork_utils: &install_gruntwork_utils
name: install gruntwork utils
command: |
curl -Ls https://raw.githubusercontent.com/gruntwork-io/gruntwork-installer/master/bootstrap-gruntwork-installer.sh | bash /dev/stdin --version "${GRUNTWORK_INSTALLER_VERSION}"
gruntwork-install --module-name "gruntwork-module-circleci-helpers" --repo "https://github.com/gruntwork-io/module-ci" --tag "${MODULE_CI_VERSION}"
gruntwork-install --binary-name "terratest_log_parser" --repo "https://github.com/gruntwork-io/terratest" --tag "${TERRATEST_LOG_PARSER_VERSION}"
configure-environment-for-gruntwork-module \
--circle-ci-2-machine-executor \
--terraform-version ${TERRAFORM_VERSION} \
--terragrunt-version ${TERRAGRUNT_VERSION} \
--packer-version ${PACKER_VERSION} \
--use-go-dep \
--go-version ${GOLANG_VERSION} \
--go-src-path test
version: 2
jobs:
build:
<<: *defaults
steps:
- checkout
- restore_cache:
keys:
- dep-v1-{{ checksum "test/Gopkg.lock" }}

# Install gruntwork utilities
- run:
<<: *install_gruntwork_utils

- save_cache:
key: dep-v1-{{ checksum "test/Gopkg.lock" }}
paths:
- ./test/vendor

# Fail the build if the pre-commit hooks don't pass. Note: if you run pre-commit install locally, these hooks will
# execute automatically every time before you commit, ensuring the build never fails at this step!
- run: pip install pre-commit==1.11.2
- run: pre-commit install
- run: pre-commit run --all-files

- persist_to_workspace:
root: /home/circleci
paths:
- project
- terraform
- packer

test:
<<: *defaults
steps:
- attach_workspace:
at: /home/circleci
- checkout
- run: echo 'export PATH=$HOME/terraform:$HOME/packer:$PATH' >> $BASH_ENV
- run:
<<: *install_gruntwork_utils
- run:
name: update gcloud
command: |
sudo apt-get remove -y google-cloud-sdk
sudo /opt/google-cloud-sdk/bin/gcloud --quiet components update
sudo /opt/google-cloud-sdk/bin/gcloud --quiet components update beta kubectl
- run:
name: run tests
command: |
mkdir -p /tmp/logs
# required for gcloud and kubectl to authenticate correctly
echo $GCLOUD_SERVICE_KEY | gcloud auth activate-service-account --key-file=-
gcloud --quiet config set project ${GOOGLE_PROJECT_ID}
gcloud --quiet config set compute/zone ${GOOGLE_COMPUTE_ZONE}
# required for terraform and terratest to authenticate correctly
echo $GCLOUD_SERVICE_KEY > /tmp/gcloud.json
export GOOGLE_APPLICATION_CREDENTIALS="/tmp/gcloud.json"
# run the tests
run-go-tests --path test --timeout 60m | tee /tmp/logs/all.log
no_output_timeout: 3600s
- run:
command: terratest_log_parser --testlog /tmp/logs/all.log --outputdir /tmp/logs
when: always
- store_artifacts:
path: /tmp/logs
- store_test_results:
path: /tmp/logs

workflows:
version: 2
build-and-test:
jobs:
- build
- test:
requires:
- build
7 changes: 5 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -21,5 +21,8 @@ out/
# Go best practices dictate that libraries should not include the vendor directory
vendor

# Ignore test data
.test_data/
# Folder used to store temporary test data by Terratest
.test-data

# Mock user-data log file
mock-user-data.log
6 changes: 6 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
repos:
- repo: https://github.com/gruntwork-io/pre-commit
sha: v0.0.2
hooks:
- id: terraform-fmt

49 changes: 49 additions & 0 deletions examples/gke-regional-private-cluster/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# GKE Regional Private Cluster

This example creates a Regional Private GKE Cluster.

Regional GKE Clusters are high-availability clusters where the cluster master is
spread across multiple GCP zones. During a zonal outage, the Kubernetes control
plane and a subset of your nodes will still be available, provided that at least
1 zone that your cluster is running in is still available.

Regional control planes are accessible even during upgrades.

By default, regional clusters will create nodes across 3 zones in a region. If
you're interested in how nodes are distributed in regional clusters, read the
GCP docs about [balancing across zones](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#balancing_across_zones).

Nodes in a private cluster are only granted private IP addresses; they're not
accessible from the public internet, as part of a defense-in-depth strategy. A
private cluster can use a GCP HTTP(S) or Network load balancer to accept public
traffic, or an internal load balancer from within your VPC network.

Private clusters use [Private Google Access](https://cloud.google.com/vpc/docs/private-access-options)
to access Google APIs such as Stackdriver, and to pull container images from
Google Container Registry. To use other APIs and services over the internet, you
can use a [`gke-regional-public-cluster`](../gke-regional-public-cluster).
Private clusters are recommended for running most apps and services.

## Limitations

No region shares GPU types across all of their zones; you will need to
explicitly specify the zones your cluster runs in in order to use GPUs.

Node Pools cannot be created in zones without a master cluster; you can update
the zones of your cluster master provided your new zones are within the
region your cluster is present in.

<!-- TODO(rileykarson): Clarify what this means when we find out- this is pulled
from the GKE docs. -->
Currently, you cannot use a proxy to reach the cluster master of a regional
cluster through its private IP address.

## How do you run these examples?

1. Install [Terraform](https://www.terraform.io/).
1. Make sure you have Python installed (version 2.x) and in your `PATH`.
1. Open `variables.tf`, and fill in any required variables that don't have a
default.
1. Run `terraform get`.
1. Run `terraform plan`.
1. If the plan looks good, run `terraform apply`.
37 changes: 37 additions & 0 deletions examples/gke-regional-public-cluster/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# GKE Regional Public Cluster

This example creates a Regional Public GKE Cluster.

Regional GKE Clusters are high-availability clusters where the cluster master is
spread across multiple GCP zones. During a zonal outage, the Kubernetes control
plane and a subset of your nodes will still be available, provided that at least
1 zone that your cluster is running in is still available.

Regional control planes are accessible even during upgrades.

By default, regional clusters will create nodes across 3 zones in a region. If
you're interested in how nodes are distributed in regional clusters, read the
GCP docs about [balancing across zones](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#balancing_across_zones).

Nodes in a public cluster are accessible from the public internet; try using a
private cluster such as in [`gke-regional-private-cluster`](../gke-regional-private-cluster)
to limit access to/from your nodes. Private clusters are recommended for running
most apps and services.

## Limitations

No region shares GPU types across all of their zones; you will need to
explicitly specify the zones your cluster runs in in order to use GPUs.

Node Pools cannot be created in zones without a master cluster; you can update
the zones of your cluster master provided your new zones are within the
region your cluster is present in.

## How do you run these examples?

1. Install [Terraform](https://learn.hashicorp.com/terraform/getting-started/install.html) v0.10.3 or later.
1. Open `variables.tf`, and fill in any required variables that don't have a
default.
1. Run `terraform get`.
1. Run `terraform plan`.
1. If the plan looks good, run `terraform apply`.
101 changes: 101 additions & 0 deletions examples/gke-regional-public-cluster/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY A GKE REGIONAL PUBLIC CLUSTER IN GOOGLE CLOUD
# This is an example of how to use the gke-cluster module to deploy a regional public Kubernetes cluster in GCP with a
# Load Balancer in front of it.
# ---------------------------------------------------------------------------------------------------------------------

provider "google-beta" {
project = "${var.project}"
region = "${var.region}"
}

# Use Terraform 0.10.x so that we can take advantage of Terraform GCP functionality as a separate provider via
# https://github.com/terraform-providers/terraform-provider-google
terraform {
required_version = ">= 0.10.3"
}

module "gke_cluster" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
# source = "git::[email protected]:gruntwork-io/gke-cluster.git//modules/gke-cluster?ref=v0.0.1"
source = "../../modules/gke-cluster"

name = "${var.cluster_name}"

project = "${var.project}"
region = "${var.region}"
network = "${google_compute_network.main.name}"
subnetwork = "${google_compute_subnetwork.main.name}"
}

# Node Pool

// Node Pool Resource
resource "google_container_node_pool" "node_pool" {
provider = "google-beta"

name = "main-pool"
project = "${var.project}"
region = "${var.region}"
cluster = "${module.gke_cluster.name}"

initial_node_count = "1"

autoscaling {
min_node_count = "1"
max_node_count = "5"
}

management {
auto_repair = "true"
auto_upgrade = "true"
}

node_config {
image_type = "COS"
machine_type = "n1-standard-1"

labels = {
all-pools-example = "true"
}

tags = ["main-pool-example"]
disk_size_gb = "30"
disk_type = "pd-standard"
preemptible = false

oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
]
}

lifecycle {
ignore_changes = ["initial_node_count"]
}

timeouts {
create = "30m"
update = "30m"
delete = "30m"
}
}

# TODO(rileykarson): Add proper VPC network config once we've made a VPC module
resource "random_string" "suffix" {
length = 4
special = false
upper = false
}

resource "google_compute_network" "main" {
name = "${var.cluster_name}-network-${random_string.suffix.result}"
auto_create_subnetworks = "false"
}

resource "google_compute_subnetwork" "main" {
name = "${var.cluster_name}-subnetwork-${random_string.suffix.result}"
ip_cidr_range = "10.0.0.0/17"
region = "${var.region}"
network = "${google_compute_network.main.self_link}"
}
22 changes: 22 additions & 0 deletions examples/gke-regional-public-cluster/outputs.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
output "cluster_endpoint" {
description = "The IP address of the cluster master."
sensitive = true
value = "${module.gke_cluster.endpoint}"
}

output "client_certificate" {
description = "Public certificate used by clients to authenticate to the cluster endpoint."
value = "${module.gke_cluster.client_certificate}"
}

output "client_key" {
description = "Private key used by clients to authenticate to the cluster endpoint."
sensitive = true
value = "${module.gke_cluster.client_key}"
}

output "cluster_ca_certificate" {
description = "The public certificate that is the root of trust for the cluster."
sensitive = true
value = "${module.gke_cluster.cluster_ca_certificate}"
}
22 changes: 22 additions & 0 deletions examples/gke-regional-public-cluster/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# ---------------------------------------------------------------------------------------------------------------------
# REQUIRED PARAMETERS
# These parameters must be supplied when consuming this module.
# ---------------------------------------------------------------------------------------------------------------------

variable "project" {
description = "The name of the GCP Project where all resources will be launched."
}

variable "region" {
description = "The Region in which all GCP resources will be launched."
}

# ---------------------------------------------------------------------------------------------------------------------
# OPTIONAL PARAMETERS
# These parameters have reasonable defaults.
# ---------------------------------------------------------------------------------------------------------------------

variable "cluster_name" {
description = "The name of the Kubernetes cluster."
default = "example-cluster"
}
1 change: 1 addition & 0 deletions examples/gke-zonal-public-cluster/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
TODO
Empty file.
1 change: 1 addition & 0 deletions main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

Loading

0 comments on commit 83d6816

Please sign in to comment.