Commit babd97d4 authored by Zachary Seguin's avatar Zachary Seguin

Initial commit

parents
terraform.tfvars
terraform.tfstate*
.vscode/
.terraform/
kubeconfig
*.log
plan
> This terraform setup is targeted for use on the [Computer Science Club Cloud](https://csclub.cloud) and setup with some of my personal computing systems (and therefore has some related hardcoded assumptions).
## Prerequisites
This terraform setup requires the following:
1. OpenStack environment
- Compute (Nova)
- Networking (Neutron)
- Load balancer (Neutron)
- Storage (Cinder)
2. PowerDNS
3. OpenID Connect authentication service
### Terraform Providers
- [github.com/inercia/terraform-provider-kubeadm](https://github.com/inercia/terraform-provider-kubeadm)
- [terraform-provider-powerdns #16](https://github.com/terraform-providers/terraform-provider-powerdns/pull/16)
provider "kubernetes" {
config_path = "${kubeamd.cluster.config_path}"
}
# Unfortunately, the kubernetes provider doesn't allow
# us to send arbitrary YAML to it.
resource "null_resource" "calico" {
depends_on = [
null_resource.master.0
]
connection {
type = "ssh"
user = "debian"
agent = true
host = openstack_networking_floatingip_v2.master.0.address
}
provisioner "file" {
content = "${templatefile("manifests/calico.yml", { pod_cidr = var.pod_cidr })}"
destination = "/tmp/calico.yml"
}
provisioner "remote-exec" {
inline = [
"sudo kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /tmp/calico.yml"
]
}
}
# Need to patch a few things to get the cluster going
resource "null_resource" "patch" {
depends_on = [
null_resource.master.0
]
connection {
type = "ssh"
user = "debian"
agent = true
host = openstack_networking_floatingip_v2.master.0.address
}
provisioner "remote-exec" {
inline = [
# allow coredns to run on uninitialized nodes
"sudo kubectl --kubeconfig /etc/kubernetes/admin.conf -n kube-system patch deployment coredns --type json -p '[ {\"op\":\"add\", \"path\":\"/spec/template/spec/tolerations/2\", \"value\":{ \"effect\": \"NoSchedule\", \"key\": \"node.cloudprovider.kubernetes.io/uninitialized\", \"value\": \"true\" }} ]'",
# add required permissions for cloud-controller-manager
"sudo kubectl --kubeconfig /etc/kubernetes/admin.conf patch clusterrolebindings system:cloud-controller-manager --type json -p '[ {\"op\":\"add\", \"path\":\"/subjects/1\", \"value\":{ \"kind\": \"ServiceAccount\", \"name\": \"pvl-controller\", \"namespace\": \"kube-system\" }},{\"op\":\"add\", \"path\":\"/subjects/2\", \"value\":{ \"kind\": \"ServiceAccount\", \"name\": \"shared-informers\", \"namespace\": \"kube-system\" }}, {\"op\":\"add\", \"path\":\"/subjects/2\", \"value\":{ \"kind\": \"ServiceAccount\", \"name\": \"cloud-node-controller\", \"namespace\": \"kube-system\" }} ]'",
# pass ssl certs to cloud-controller-manager
"sudo kubectl --kubeconfig /etc/kubernetes/admin.conf -n kube-system patch daemonset cloud-controller-manager --type json -p '[ {\"op\":\"add\", \"path\":\"/spec/template/spec/volumes/1\", \"value\":{ \"name\": \"ca-certs\", \"hostPath\": { \"path\": \"/etc/ssl/certs/\", \"type\": \"DirectoryOrCreate\"} , \"mountPath\": \"/etc/ssl/certs\" }}, {\"op\":\"add\", \"path\":\"/spec/template/spec/containers/0/volumeMounts/1\", \"value\":{ \"name\": \"ca-certs\", \"mountPath\": \"/etc/ssl/certs\", \"readOnly\": true }} ]'"
]
}
}
resource "powerdns_record" "api" {
zone = "${var.clusterid}.zacharyseguin.ca."
name = "api.${var.clusterid}.zacharyseguin.ca."
type = "A"
ttl = 60
records = [ openstack_networking_floatingip_v2.api.address ]
}
resource "powerdns_record" "master" {
count = var.master_count
zone = "${var.clusterid}.zacharyseguin.ca."
name = "master-${count.index}.${var.clusterid}.zacharyseguin.ca."
type = "A"
ttl = 60
records = [ openstack_compute_instance_v2.master.*.network.0.fixed_ip_v4[count.index] ]
}
resource "powerdns_record" "master-public" {
count = var.master_count
zone = "${var.clusterid}.zacharyseguin.ca."
name = "master-${count.index}-public.${var.clusterid}.zacharyseguin.ca."
type = "A"
ttl = 60
records = [ openstack_networking_floatingip_v2.master.*.address[count.index] ]
}
resource "powerdns_record" "worker" {
count = var.worker_count
zone = "${var.clusterid}.zacharyseguin.ca."
name = "worker-${count.index}.${var.clusterid}.zacharyseguin.ca."
type = "A"
ttl = 60
records = ["${openstack_compute_instance_v2.worker.*.network.0.fixed_ip_v4[count.index]}"]
}
resource "powerdns_record" "worker-public" {
count = var.worker_count
zone = "${var.clusterid}.zacharyseguin.ca."
name = "worker-${count.index}-public.${var.clusterid}.zacharyseguin.ca."
type = "A"
ttl = 60
records = [ openstack_networking_floatingip_v2.worker.*.address[count.index] ]
}
resource "kubeadm" "cluster" {
config_path = "./kubeconfig"
api {
# External IP for the load balancer
external = "api.${var.clusterid}.zacharyseguin.ca"
# If necessary..
# alt_names = "IP=ip,DNS=name"
}
network {
dns_domain = "${var.clusterid}.cluster.zacharyseguin.ca"
pods = "${var.pod_cidr}"
services = "${var.services_cidr}"
}
addons {
helm = "false"
dashboard = "false"
}
cloud {
provider = "openstack"
config = <<EOF
[Global]
auth-url="${var.openstack_auth_url}"
username="${var.openstack_user_name}"
password="${var.openstack_password}"
region="${var.openstack_region}"
tenant-name="${var.openstack_tenant_name}"
domain-name="${var.openstack_domain_name}"
[BlockStorage]
[LoadBalancer]
subnet-id=${openstack_networking_subnet_v2.node.id}
floating-network-id=${var.floating_network_id}
lb-method=ROUND_ROBIN
create-monitor=yes
monitor-delay=1m
monitor-timeout=30s
monitor-max-retries=3
EOF
}
runtime {
engine = "containerd"
extra_args {
api_server = {
"oidc-issuer-url" = var.oidc_issuer_url
"oidc-client-id" = var.oidc_client_id
"oidc-username-claim" = var.oidc_username_claim
"oidc-groups-claim" = var.oidc_groups_claim
}
}
}
}
resource "openstack_lb_loadbalancer_v2" "api" {
vip_subnet_id = openstack_networking_subnet_v2.node.id
name = "${var.clusterid}-api"
}
resource "openstack_lb_listener_v2" "api" {
protocol = "TCP"
protocol_port = 6443
loadbalancer_id = openstack_lb_loadbalancer_v2.api.id
default_pool_id = openstack_lb_pool_v2.api.id
name = "${var.clusterid}-api-6443"
}
resource "openstack_lb_pool_v2" "api" {
protocol = "TCP"
lb_method = "ROUND_ROBIN"
loadbalancer_id = openstack_lb_loadbalancer_v2.api.id
name = "${var.clusterid}-api-6443"
}
resource "openstack_lb_monitor_v2" "monitor_1" {
pool_id = "${openstack_lb_pool_v2.api.id}"
type = "TCP"
delay = 5
timeout = 5
max_retries = 3
name = "${var.clusterid}-api-6443"
}
resource "openstack_lb_member_v2" "api" {
count = var.master_count
address = openstack_compute_instance_v2.master.*.network.0.fixed_ip_v4[count.index]
protocol_port = 6443
pool_id = openstack_lb_pool_v2.api.id
subnet_id = openstack_networking_subnet_v2.node.id
name = openstack_compute_instance_v2.master.*.name[count.index]
}
resource "openstack_networking_floatingip_associate_v2" "api" {
floating_ip = openstack_networking_floatingip_v2.api.address
port_id = openstack_lb_loadbalancer_v2.api.vip_port_id
}
This diff is collapsed.
# Flavours
data "openstack_compute_flavor_v2" "master" {
name = var.master_flavour
}
data "openstack_compute_flavor_v2" "worker" {
name = var.worker_flavour
}
# Machines
resource "openstack_networking_port_v2" "master-port" {
name = "${var.clusterid}-master-${count.index}"
count = var.master_count
network_id = openstack_networking_network_v2.node.id
fixed_ip {
subnet_id = openstack_networking_subnet_v2.node.id
}
admin_state_up = "true"
security_group_ids = [ "${openstack_networking_secgroup_v2.cluster.id}", "${openstack_networking_secgroup_v2.master.id}" ]
allowed_address_pairs {
ip_address = var.pod_cidr
}
}
resource "openstack_compute_instance_v2" "master" {
name = "${var.clusterid}-master-${count.index}"
metadata = {
name = "${var.clusterid}-master-${count.index}"
cluster = var.clusterid
}
count = var.master_count
image_id = var.image_id
flavor_id = data.openstack_compute_flavor_v2.master.id
key_pair = var.key_pair
network {
port = openstack_networking_port_v2.master-port.*.id[count.index]
}
user_data = "${file("scripts/cloudconfig")}"
}
resource "null_resource" "master" {
count = var.master_count
depends_on = [
openstack_compute_instance_v2.master,
openstack_compute_floatingip_associate_v2.master,
openstack_networking_floatingip_associate_v2.api,
openstack_networking_secgroup_rule_v2.ingress_ssh
]
triggers = {
compute_instance = openstack_compute_instance_v2.master[count.index].id
}
connection {
type = "ssh"
user = "debian"
agent = true
host = openstack_networking_floatingip_v2.master.*.address[count.index]
}
provisioner "remote-exec" {
inline = [
"bash -c \"until [ -f /configured ]; do echo 'Waiting for cloud-init to complete...'; sleep 5; done\""
]
}
provisioner "kubeadm" {
config = kubeadm.cluster.config
join = "${count.index == 0 ? "" : openstack_compute_instance_v2.master.0.network.0.fixed_ip_v4}"
role = "master"
install {
auto = false
inline = "${file("scripts/nodesetup.sh")}"
}
}
provisioner "kubeadm" {
when = "destroy"
config = kubeadm.cluster.config
drain = true
}
}
resource "openstack_compute_floatingip_associate_v2" "master" {
count = var.master_count
floating_ip = openstack_networking_floatingip_v2.master.*.address[count.index]
instance_id = openstack_compute_instance_v2.master.*.id[count.index]
}
# Network
data "openstack_networking_network_v2" "extnet" {
name = var.network_name
}
resource "openstack_networking_network_v2" "node" {
name = "${var.clusterid}_node_network"
admin_state_up = "true"
}
resource "openstack_networking_subnetpool_v2" "node" {
name = "${var.clusterid}_node_pool"
ip_version = 4
prefixes = [var.node_cidr]
}
resource "openstack_networking_subnet_v2" "node" {
name = "${var.clusterid}_node_subnet"
network_id = openstack_networking_network_v2.node.id
cidr = var.node_cidr
ip_version = 4
subnetpool_id = openstack_networking_subnetpool_v2.node.id
dns_nameservers = ["129.97.134.4", "129.97.18.20"]
}
resource "openstack_networking_router_v2" "cluster" {
name = "${var.clusterid}_node"
admin_state_up = true
external_network_id = data.openstack_networking_network_v2.extnet.id
}
resource "openstack_networking_router_route_v2" "uw-internet" {
router_id = openstack_networking_router_v2.cluster.id
destination_cidr = "129.97.0.0/16"
next_hop = "172.19.134.1"
}
resource "openstack_networking_router_route_v2" "uw-intranet_1" {
router_id = openstack_networking_router_v2.cluster.id
destination_cidr = "172.12.0.0/16"
next_hop = "172.19.134.1"
}
resource "openstack_networking_router_route_v2" "uw-intranet_2" {
router_id = openstack_networking_router_v2.cluster.id
destination_cidr = "10.0.0.0/8"
next_hop = "172.19.134.1"
}
resource "openstack_networking_router_interface_v2" "cluster" {
router_id = openstack_networking_router_v2.cluster.id
subnet_id = openstack_networking_subnet_v2.node.id
}
# Floating IPs
resource "openstack_networking_floatingip_v2" "api" {
pool = data.openstack_networking_network_v2.extnet.name
description = "${var.clusterid}-api"
}
resource "openstack_networking_floatingip_v2" "master" {
count = var.master_count
pool = data.openstack_networking_network_v2.extnet.name
description = openstack_compute_instance_v2.master.*.name[count.index]
}
resource "openstack_networking_floatingip_v2" "worker" {
count = var.worker_count
pool = data.openstack_networking_network_v2.extnet.name
description = openstack_compute_instance_v2.worker.*.name[count.index]
}
# OpenStack
provider "openstack" {
user_name = "${var.openstack_user_name}"
tenant_name = "${var.openstack_tenant_name}"
domain_name = "${var.openstack_domain_name}"
password = "${var.openstack_password}"
auth_url = "${var.openstack_auth_url}"
region = "${var.openstack_region}"
}
# PowerDNS
provider "powerdns" {
server_url = "${var.powerdns_url}"
api_key = "${var.powerdns_api_key}"
}
#cloud-config
locale: en_CA.UTF-8
timezone: America/Toronto
# Set repositories
apt:
primary:
- arches: [default]
uri: http://mirror.csclub.uwaterloo.ca/debian
security:
- arches: [default]
uri: http://mirror.csclub.uwaterloo.ca/debian-security
# Write configuration files
write_files:
- content: |
br_netfilter
path: /etc/modules-load.d/netfilter.conf
- content: |
net.ipv4.ip_forward = 1
path: /etc/sysctl.d/99-enable-ipforwarding.conf
# Upgrade packages
package_reboot_if_required: false
package_update: true
package_upgrade: true
# Install additional packages
packages:
- apt-transport-https
- curl
- make
- bridge-utils
# Run commands
runcmd:
- modprobe br_netfilter
- sysctl -w net.ipv4.ip_forward=1
- touch /configured
# Add kubernetes repository
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add
echo 'deb http://apt.kubernetes.io/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list
# Install packages (requires pre-requsites)
apt update
apt install -y kubeadm kubelet
# Hold package versions
apt-mark hold kubeadm kubelet
# Install runc
RUNC_VERSION=1.0.0-rc8
curl -Lo /usr/local/sbin/runc https://github.com/opencontainers/runc/releases/download/v$RUNC_VERSION/runc.amd64
chmod 755 /usr/local/sbin/runc
# Install containerd
CONTAINERD_VERSION=1.2.7
curl -Lo containerd.tar.gz https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz
tar xf containerd.tar.gz -C /usr/local
rm containerd.tar.gz
curl -Lo /etc/systemd/system/containerd.service https://raw.githubusercontent.com/containerd/cri/master/contrib/systemd-units/containerd.service
systemctl daemon-reload
systemctl enable containerd
systemctl start containerd
echo "runtime-endpoint: unix:///run/containerd/containerd.sock" > /etc/crictl.yaml
resource "openstack_networking_secgroup_v2" "cluster" {
name = "${var.clusterid}_cluster"
description = "Security group for Kubernetes cluster"
}
resource "openstack_networking_secgroup_rule_v2" "ingress_internal" {
direction = "ingress"
ethertype = "IPv4"
remote_group_id = openstack_networking_secgroup_v2.cluster.id
security_group_id = openstack_networking_secgroup_v2.cluster.id
}
resource "openstack_networking_secgroup_rule_v2" "ingress_ssh" {
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 22
port_range_max = 22
remote_ip_prefix = "0.0.0.0/0"
security_group_id = openstack_networking_secgroup_v2.cluster.id
}
resource "openstack_networking_secgroup_rule_v2" "ingress_icmp" {
direction = "ingress"
ethertype = "IPv4"
protocol = "icmp"
remote_ip_prefix = "0.0.0.0/0"
security_group_id = openstack_networking_secgroup_v2.cluster.id
}
resource "openstack_networking_secgroup_rule_v2" "ingress_nodeport_tcp" {
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
remote_ip_prefix = "0.0.0.0/0"
port_range_min = 30000
port_range_max = 32767
security_group_id = openstack_networking_secgroup_v2.cluster.id
}
resource "openstack_networking_secgroup_rule_v2" "ingress_nodeport_udp" {
direction = "ingress"
ethertype = "IPv4"
protocol = "udp"
remote_ip_prefix = "0.0.0.0/0"
port_range_min = 30000
port_range_max = 32767
security_group_id = openstack_networking_secgroup_v2.cluster.id
}
resource "openstack_networking_secgroup_v2" "master" {
name = "${var.clusterid}_master"
description = "Security group for Master"
}
resource "openstack_networking_secgroup_rule_v2" "ingress_api_server" {
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 6443
port_range_max = 6443
remote_ip_prefix = "0.0.0.0/0"
security_group_id = openstack_networking_secgroup_v2.master.id
}
# POWERDNS
variable "powerdns_url" {}
variable "powerdns_api_key" {}
# OPENSTACK
variable "openstack_auth_url" {}
variable "openstack_user_name" {}
variable "openstack_password" {}
variable "openstack_tenant_name" {}
variable "openstack_domain_name" {}
variable "openstack_region" {}
variable "network_name" {}
variable "key_pair" {}
variable "floating_network_id" {}
variable "image_id" {}
variable "master_flavour" {}
variable "worker_flavour" {}
# CLUSTER
variable "clusterid" {}
variable "node_cidr" {}
variable "pod_cidr" {}
variable "services_cidr" {}
variable "master_count" {}
variable "worker_count" {}
# AUTH
variable "oidc_issuer_url" {}
variable "oidc_client_id" {}
variable "oidc_username_claim" {
default = "name"
}
variable "oidc_groups_claim" {
default = "groups"
}
resource "openstack_networking_port_v2" "worker-port" {
name = "${var.clusterid}-worker-${count.index}"
count = var.worker_count
network_id = openstack_networking_network_v2.node.id
fixed_ip {
subnet_id = openstack_networking_subnet_v2.node.id
}
admin_state_up = "true"
security_group_ids = [ openstack_networking_secgroup_v2.cluster.id ]
allowed_address_pairs {
ip_address = var.pod_cidr
}
}
resource "openstack_compute_instance_v2" "worker" {
name = "${var.clusterid}-worker-${count.index}"
count = var.worker_count
image_id = var.image_id
flavor_id = data.openstack_compute_flavor_v2.worker.id
key_pair = var.key_pair
network {
port = openstack_networking_port_v2.worker-port.*.id[count.index]
}
user_data = "${file("scripts/cloudconfig")}"
}
resource "null_resource" "worker" {
count = var.worker_count
depends_on = [
openstack_compute_instance_v2.worker,
openstack_compute_floatingip_associate_v2.worker,
openstack_networking_floatingip_associate_v2.api,
openstack_networking_secgroup_rule_v2.ingress_ssh
]
triggers = {
compute_instance = openstack_compute_instance_v2.worker[count.index].id
}
connection {
type = "ssh"
user = "debian"
agent = true
host = openstack_networking_floatingip_v2.worker.*.address[count.index]
}
provisioner "remote-exec" {
inline = [
"bash -c \"until [ -f /configured ]; do echo 'Waiting for cloud-init to complete...'; sleep 5; done\""
]