adding asynchronous slave to galera cluster

Next part
# add settings on master nodes to server.cnf

[mysqld]
gtid_domain_id=1
server_id=1
binlog_format=ROW
log_slave_updates=1
log_bin=binlog

# restart all nodes

# create replication user

MariaDB [(none)]> create user 'repl'@'%' identified by 'secret';
MariaDB [(none)]> grant all on *.* to 'repl'@'%';

# make backup of current state

mysqldump --all-databases --single-transaction --triggers --routines --host=127.0.0.1 --user=root --password=secret > dump.sql
# copy it to slave

# configure new slave

# install same packages
# server.cnf
[mysqld]
server-id=2 # different!
log-bin=mysql-bin
binlog_format=ROW
relay_log=relay-log
gtid_domain_id=1
log_slave_updates=1

# start service, set root password

service mysql start
mysql_secure_installation

# check master position

MariaDB [(none)]>  SHOW MASTER STATUS\G
*************************** 1. row ***************************
            File: binlog.000002
        Position: 846
    Binlog_Do_DB:
Binlog_Ignore_DB:
1 row in set (0.00 sec)

MariaDB [(none)]> SELECT BINLOG_GTID_POS('binlog.00002', 846);
+--------------------------------------+
| BINLOG_GTID_POS('binlog.00002', 846) |
+--------------------------------------+
| NULL                                 |
+--------------------------------------+
1 row in set (0.00 sec)

# now on slave

MariaDB [(none)]> SET GLOBAL gtid_slave_pos = ''; # since it was null...
MariaDB [(none)]> CHANGE MASTER TO master_use_gtid=slave_pos;
MariaDB [(none)]> change master to master_host='192.168.56.111', master_port=3306, master_user='repl', master_password='secret';
MariaDB [(none)]> start slave;
Query OK, 0 rows affected, 1 warning (0.00 sec)

MariaDB [(none)]> show slave status\G;
*************************** 1. row ***************************
               Slave_IO_State: Waiting for master to send event
                  Master_Host: 192.168.56.111
                  Master_User: repl
                  Master_Port: 3306
                Connect_Retry: 60
              Master_Log_File: binlog.000002
          Read_Master_Log_Pos: 846
               Relay_Log_File: relay-log.000002
                Relay_Log_Pos: 1052
        Relay_Master_Log_File: binlog.000002
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes
              Replicate_Do_DB:
          Replicate_Ignore_DB:
           Replicate_Do_Table:
       Replicate_Ignore_Table:
      Replicate_Wild_Do_Table:
  Replicate_Wild_Ignore_Table:
                   Last_Errno: 0
                   Last_Error:
                 Skip_Counter: 0
          Exec_Master_Log_Pos: 846
              Relay_Log_Space: 1343
              Until_Condition: None
               Until_Log_File:
                Until_Log_Pos: 0
           Master_SSL_Allowed: No
           Master_SSL_CA_File:
           Master_SSL_CA_Path:
              Master_SSL_Cert:
            Master_SSL_Cipher:
               Master_SSL_Key:
        Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
                Last_IO_Errno: 0
                Last_IO_Error:
               Last_SQL_Errno: 0
               Last_SQL_Error:
  Replicate_Ignore_Server_Ids:
             Master_Server_Id: 1
               Master_SSL_Crl:
           Master_SSL_Crlpath:
                   Using_Gtid: Slave_Pos
                  Gtid_IO_Pos: 1-1-4
1 row in set (0.00 sec)

galera cluster (10.0) on centos 7

Some notes from installation:

# prepare repo

[root@galera01 vagrant]# cat /etc/yum.repos.d/maria.repo
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.0/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1

# install all necessary packages on all nodes

yum install socat
rpm --import https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
yum install mariadb-server
yum install https://www.percona.com/downloads/XtraBackup/Percona-XtraBackup-2.4.1/binary/redhat/7/x86_64/percona-xtrabackup-24-2.4.1-1.el7.x86_64.rpm

# configure server.cnf on all servers (important, add it in server.cnf not new extra file or it won’t be parsed!)

[galera]
# Mandatory settings
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_address="gcomm://192.168.56.111,192.168.56.112,192.168.56.113" # IPs of nodes

binlog_format=row
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2
bind-address=0.0.0.0

#Cluster Settings
wsrep_cluster_name="Test_Cluster"
wsrep_node_address="192.168.56.111" # own IP address
wsrep_node_name="galera01" #hostname / node name

# replication provider
wsrep_sst_method=xtrabackup
wsrep_sst_auth="sstuser:s3cretPass"

# Optional setting
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0

Other Node

[root@galera02 my.cnf.d]# cat server.cnf
#
# These groups are read by MariaDB server.
# Use it for options that only the server (but not clients) should see
#
# See the examples of server my.cnf files in /usr/share/mysql/
#

# this is read by the standalone daemon and embedded servers
[server]

# this is only for the mysqld standalone daemon
[mysqld]

#
# * Galera-related settings
#
[galera]
# Mandatory settings
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_address="gcomm://192.168.56.111,192.168.56.112,192.168.56.113"

binlog_format=row
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2
bind-address=0.0.0.0

bind-address=0.0.0.0

#Cluster Settings
wsrep_cluster_name="Test_Cluster"
wsrep_node_address="192.168.56.112" # own IP address
wsrep_node_name="galera02" #hostname / node name

# replication provider
wsrep_sst_method=xtrabackup
wsrep_sst_auth="sstuser:s3cretPass"

# Optional setting
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0

Bootstrap inital node

service mysql bootstrap

If you need to restart password for ‘root’ user…

# due to some bug you need to copy this file 
mkdir /share/mysql
cp /usr/share/mysql/english/errmsg.sys /share/mysql/
mysqld_safe --skip-grant-tables &
mysql -uroot mysql
MariaDB [(none)]> UPDATE mysql.user SET password = PASSWORD('secret') WHERE user = 'root' and host = 'localhost';
MariaDB [(none)]> flush privileges;
# kill all mysqld process
ps -ef | grep mysql 
# boot up again
service mysql start

Now on another node to join cluster simply start MySQL service (assuming you have server.cnf configured)

[root@galera03 my.cnf.d]# service mysql start
Starting MySQL.171010 16:13:16 mysqld_safe Logging to '/var/lib/mysql/galera03.err'.
171010 16:13:16 mysqld_safe Starting mysqld daemon with databases from /var/lib/mysql
..SST in progress, setting sleep higher. SUCCESS!

Done!

kvm/libvirt – edit default pool (storage)

Using Ubuntu’s tools uvt-kvm (wrapper for virsh), default pool is uvtool

virsh pool-edit uvtool
# edit pool
virsh pool-destroy uvtool
virsh pool-start uvtool
root@me ~/ # uvt-simplestreams-libvirt sync arch=amd64 release=xenial
root@me ~/ # virsh vol-list uvtool
 Name                 Path
------------------------------------------------------------------------------
 x-uvt-b64-Y29tLnVidW50dS5jbG91ZDpzZXJ2ZXI6MTYuMDQ6YW1kNjQgMjAxNzA5MTk= /mnt/md3/concourse-worker/x-uvt-b64-Y29tLnVidW50dS5jbG91ZDpzZXJ2ZXI6MTYuMDQ6YW1kNjQgMjAxNzA5MTk=

implementing kube-lego with rbac on kubernetes using helm chart

Today we have option to get free TLS certificate for free – Let’s Encrypt. This has two sides – malicious attackers can prepare certificates for typo-squatted domains and use it for phishing or legit users can secure traffic on their websites… Let’s Encrypt has an API we can interact with and issue requests for new certificates (these expires after 90 days). Someone was clever enough to make container for it and apply it to all ingress domain inside our cluster. Find project site here.

# preparation
You will need few elements
– namespace for kube-lego (doesn’t have to be, but it’s good to separate things)
– RBAC role so kube-lego can access ingress elements
– ConfigMap with address of production API of Let’s Encrypt and your e-mail
– Deployment itself
– new subchart

# raw code

# inside subcharts dir
mkdir -p kube-lego/templates
# cat Chart.yaml
name: kube-lego
version: 0.0.1-develop
# cat templates/namespace.yaml
cat templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
    name: kube-lego
# cat templates/rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: ingress-secret-admin
rules:
- apiGroups: [""]
  resources: ["secrets"]
  verbs:
  - get
  - watch
  - list
  - create
  - update
  - patch
- apiGroups: [""]
  resources: ["services"]
  verbs:
  - get
  - create
- apiGroups: ["extensions"]
  resources: ["ingresses"]
  verbs:
  - get
  - watch
  - list
  - create
  - update
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: kube-lego
roleRef:
  kind: ClusterRole
  name: ingress-secret-admin
  apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
  name: default
  namespace: kube-lego
# cat templates/configmap.yaml
apiVersion: v1
metadata:
  name: kube-lego
  namespace: kube-lego
data:
  # modify this to specify your address
  lego.email: "your@email.com"
  # configure letsencrypt's production api
  lego.url: "https://acme-v01.api.letsencrypt.org/directory"
kind: ConfigMap
# cat templates/deployment.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kube-lego
  namespace: kube-lego
spec:
  replicas: 1
  template:
    metadata:
      labels:
        app: kube-lego
    spec:
      containers:
      - name: kube-lego
        image: jetstack/kube-lego:0.1.5
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
        env:
        - name: LEGO_EMAIL
          valueFrom:
            configMapKeyRef:
              name: kube-lego
              key: lego.email
        - name: LEGO_URL
          valueFrom:
            configMapKeyRef:
              name: kube-lego
              key: lego.url
        - name: LEGO_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: LEGO_POD_IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        readinessProbe:
          httpGet:
            path: /healthz
            port: 8080
          initialDelaySeconds: 5
          timeoutSeconds: 1

That’s it, you are ready to upgrade your app with

helm upgrade --reuse-values my-release my-chart-source 

Check if pod is running and it’s logs…

11:32:07 charts/kube-lego [d47zm3@beast]  - [master] » kubectl get pods --namespace kube-lego
NAME                         READY     STATUS    RESTARTS   AGE
kube-lego-1247378332-w859s   1/1       Running   0          9d
11:32:38 charts/kube-lego [d47zm3@beast]  - [master] » kubectl --namespace kube-lego logs kube-lego-1247378332-w859s | tail -n10
time="2017-08-18T17:51:51Z" level=info msg="Periodically check certificates at 2017-08-18 17:51:51.449354089 +0000 UTC" context=kubelego
time="2017-08-18T17:51:51Z" level=info msg="ignoring as has no annotation 'kubernetes.io/tls-acme'" context=ingress name=kube-lego-nginx namespace=kube-lego
time="2017-08-18T17:51:51Z" level=info msg="process certificate requests for ingresses" context=kubelego
time="2017-08-18T17:51:51Z" level=info msg="cert expires in 64.2 days, no renewal needed" context="ingress_tls" expire_time=2017-10-21 23:31:00 +0000 UTC name=bethebeast-ingress namespace=default
time="2017-08-18T17:51:51Z" level=info msg="no cert request needed" context="ingress_tls" name=bethebeast-ingress namespace=default
time="2017-08-19T01:51:51Z" level=info msg="Periodically check certificates at 2017-08-19 01:51:51.449344157 +0000 UTC" context=kubelego
time="2017-08-19T01:51:51Z" level=info msg="ignoring as has no annotation 'kubernetes.io/tls-acme'" context=ingress name=kube-lego-nginx namespace=kube-lego
time="2017-08-19T01:51:51Z" level=info msg="process certificate requests for ingresses" context=kubelego
time="2017-08-19T01:51:51Z" level=info msg="cert expires in 63.9 days, no renewal needed" context="ingress_tls" expire_time=2017-10-21 23:31:00 +0000 UTC name=bethebeast-ingress namespace=default
time="2017-08-19T01:51:51Z" level=info msg="no cert request needed" context="ingress_tls" name=bethebeast-ingress namespace=default

creating kops cluster

Kops (Kubernetes Operations) is awesome tool for setting up Kubernetes cluster on AWS/GCP/vSphere and it supports various options like mutli-master, multi node, spreading over AZs etc. Here’s offical repo link. Let’s use AWS as it’s most stable path and easily available for everyone. You will need:
– AWS account
– Route53 DNS Domain – that’s the part you’re gonna have to shell out some money, I think best choice would be cheap, 9.0$ .be domain – simply buy it using your chosen domain name and let AWS create hosted zone for you
– kops client (binary) – available on repo

Here we go…

# set up new IAM account for KOPS (using master IAM)

#!/bin/bash

USER=kops-btb
GROUP=kops-btb
CLUSTER=bethebeast.be

function decho
{
  string=$1
  echo "[$( date +'%H:%M:%S' )] ${string}"
}

decho "Make bucket for KOPS state store..."
aws s3 mb s3://kops-${CLUSTER}

decho "Make IAM group for KOPS..."
aws iam create-group --group-name ${GROUP}

export arns="
arn:aws:iam::aws:policy/AmazonEC2FullAccess
arn:aws:iam::aws:policy/AmazonRoute53FullAccess
arn:aws:iam::aws:policy/AmazonS3FullAccess
arn:aws:iam::aws:policy/IAMFullAccess
arn:aws:iam::aws:policy/AmazonVPCFullAccess"

decho "Attach right policies to IAM group..."
for arn in $arns; do aws iam attach-group-policy --policy-arn "$arn" --group-name ${GROUP}; done

decho "Create IAM user..."
aws iam create-user --user-name ${USER}

decho "Add IAM user to KOPS group..."
aws iam add-user-to-group --user-name ${USER} --group-name ${GROUP}

# this will print out credentials, save them!

decho "Create Access Key for IAM user..."
aws iam create-access-key --user-name ${USER}

# switch to new IAM account

#!/bin/bash

unset AWS_ACCESS_KEY_ID
unset AWS_SECRET_ACCESS_KEY
unset KOPS_CLUSTER
unset CLUSTER
unset KOPS_STATE_STORE

export AWS_ACCESS_KEY_ID=SECRET
export AWS_SECRET_ACCESS_KEY=SECRET

export CLUSTER=k8s.cluster.me
export KOPS_STATE_STORE=s3://kops-cluster.me/

# generate config

I chose t2.small as it’s in free tier… so using 3-node cluster isn’t a problem for couple of hours since it will be free and you can destroy it and recreate it later. Setup below will create 1-master 2-node cluster using two AZs. Remember, whenever you’re doing any operation on cluster, you need to add –yes to actually apply it, otherwise you will only preview changes.

13:03:19 ~/bethebeast.be [~d47zm3@w0rk~] » kops create cluster --state=${KOPS_STATE_STORE} --cloud aws --zones us-east-1a,us-east-1b --node-count 2 --node-size t2.small --master-size t2.small ${CLUSTER}

13:15:19 ~/bethebeast.be [~d47zm3@w0rk~] » kops update cluster --state ${KOPS_STATE_STORE} ${CLUSTER} --yes
I0804 13:15:28.790042   47435 executor.go:91] Tasks: 0 done / 65 total; 34 can run
I0804 13:15:29.851813   47435 vfs_castore.go:422] Issuing new certificate: "master"
I0804 13:15:29.949330   47435 vfs_castore.go:422] Issuing new certificate: "kube-proxy"
I0804 13:15:30.044879   47435 vfs_castore.go:422] Issuing new certificate: "kube-controller-manager"
I0804 13:15:30.107885   47435 vfs_castore.go:422] Issuing new certificate: "kube-scheduler"
I0804 13:15:30.185555   47435 vfs_castore.go:422] Issuing new certificate: "kubecfg"
I0804 13:15:30.205744   47435 vfs_castore.go:422] Issuing new certificate: "kubelet"
I0804 13:15:30.497677   47435 vfs_castore.go:422] Issuing new certificate: "kops"
I0804 13:15:32.361043   47435 executor.go:91] Tasks: 34 done / 65 total; 13 can run
I0804 13:15:34.415232   47435 executor.go:91] Tasks: 47 done / 65 total; 16 can run
I0804 13:15:35.986796   47435 launchconfiguration.go:320] waiting for IAM instance profile "masters.k8s.bethebeast.be" to be ready
I0804 13:15:36.006589   47435 launchconfiguration.go:320] waiting for IAM instance profile "nodes.k8s.bethebeast.be" to be ready
I0804 13:15:46.881689   47435 executor.go:91] Tasks: 63 done / 65 total; 2 can run
I0804 13:15:47.901885   47435 executor.go:91] Tasks: 65 done / 65 total; 0 can run
I0804 13:15:47.901946   47435 dns.go:152] Pre-creating DNS records
I0804 13:15:49.166337   47435 update_cluster.go:229] Exporting kubecfg for cluster
Kops has set your kubectl context to k8s.bethebeast.be

Cluster is starting.  It should be ready in a few minutes.

Suggestions:
 * validate cluster: kops validate cluster
 * list nodes: kubectl get nodes --show-labels
 * ssh to the master: ssh -i ~/.ssh/id_rsa admin@api.k8s.bethebeast.be
The admin user is specific to Debian. If not using Debian please use the appropriate user based on your OS.
 * read about installing addons: https://github.com/kubernetes/kops/blob/master/docs/addons.md

Let’s see it after some minutes

13:20:01 ~/bethebeast.be [~d47zm3@w0rk~] » kops validate cluster
Using cluster from kubectl context: k8s.bethebeast.be

Validating cluster k8s.bethebeast.be

INSTANCE GROUPS
NAME                    ROLE    MACHINETYPE     MIN     MAX     SUBNETS
master-us-east-1a       Master  t2.small        1       1       us-east-1a
nodes                   Node    t2.small        2       2       us-east-1a,us-east-1b

NODE STATUS
NAME                            ROLE    READY
ip-172-20-49-65.ec2.internal    node    True
ip-172-20-50-77.ec2.internal    master  True
ip-172-20-93-24.ec2.internal    node    True

Your cluster k8s.bethebeast.be is ready
13:20:57 ~/bethebeast.be [~d47zm3@w0rk~] » kubectl get nodes
NAME                           STATUS    AGE       VERSION
ip-172-20-49-65.ec2.internal   Ready     1m        v1.6.2
ip-172-20-50-77.ec2.internal   Ready     2m        v1.6.2
ip-172-20-93-24.ec2.internal   Ready     1m        v1.6.2

# delete cluster

13:33:44 ~/bethebeast.be [~d47zm3@w0rk~] » kops delete cluster k8s.bethebeast.be --yes
TYPE                    NAME                                                                            ID
autoscaling-config      master-us-east-1a.masters.k8s.bethebeast.be-20170804111534                      master-us-east-1a.masters.k8s.bethebeast.be-20170804111534
autoscaling-config      nodes.k8s.bethebeast.be-20170804111534                                          nodes.k8s.bethebeast.be-20170804111534
autoscaling-group       master-us-east-1a.masters.k8s.bethebeast.be                                     master-us-east-1a.masters.k8s.bethebeast.be
autoscaling-group       nodes.k8s.bethebeast.be                                                         nodes.k8s.bethebeast.be
dhcp-options            k8s.bethebeast.be                                                               dopt-930850f5
iam-instance-profile    masters.k8s.bethebeast.be                                                       masters.k8s.bethebeast.be
iam-instance-profile    nodes.k8s.bethebeast.be                                                         nodes.k8s.bethebeast.be
iam-role                masters.k8s.bethebeast.be                                                       masters.k8s.bethebeast.be
iam-role                nodes.k8s.bethebeast.be                                                         nodes.k8s.bethebeast.be
instance                master-us-east-1a.masters.k8s.bethebeast.be                                     i-00773cabb05d3b48a
instance                nodes.k8s.bethebeast.be                                                         i-05c3c6e5dd9eafcb2
instance                nodes.k8s.bethebeast.be                                                         i-0d693abcfba63bf08
internet-gateway        k8s.bethebeast.be                                                               igw-2875594e
keypair                 kubernetes.k8s.bethebeast.be-17:04:b1:25:95:28:8a:6d:38:80:08:ec:7c:d3:96:70    kubernetes.k8s.bethebeast.be-17:04:b1:25:95:28:8a:6d:38:80:08:ec:7c:d3:96:70
route-table             k8s.bethebeast.be                                                               rtb-eee9ff96
route53-record          api.internal.k8s.bethebeast.be.                                                 Z1SS02MFP8BKBH/api.internal.k8s.bethebeast.be.
route53-record          api.k8s.bethebeast.be.                                                          Z1SS02MFP8BKBH/api.k8s.bethebeast.be.
route53-record          etcd-a.internal.k8s.bethebeast.be.                                              Z1SS02MFP8BKBH/etcd-a.internal.k8s.bethebeast.be.
route53-record          etcd-events-a.internal.k8s.bethebeast.be.                                       Z1SS02MFP8BKBH/etcd-events-a.internal.k8s.bethebeast.be.
security-group          masters.k8s.bethebeast.be                                                       sg-623cd312
security-group          nodes.k8s.bethebeast.be                                                         sg-b424cbc4
subnet                  us-east-1a.k8s.bethebeast.be                                                    subnet-0f117b23
subnet                  us-east-1b.k8s.bethebeast.be                                                    subnet-6de6ca25
volume                  a.etcd-events.k8s.bethebeast.be                                                 vol-09f97238f26eb8ad0
volume                  a.etcd-main.k8s.bethebeast.be                                                   vol-03e62dca7968d86c4
vpc                     k8s.bethebeast.be                                                               vpc-d5e69dac

route53-record:Z1SS02MFP8BKBH/etcd-events-a.internal.k8s.bethebeast.be. ok
keypair:kubernetes.k8s.bethebeast.be-17:04:b1:25:95:28:8a:6d:38:80:08:ec:7c:d3:96:70    ok
autoscaling-group:master-us-east-1a.masters.k8s.bethebeast.be   ok
instance:i-05c3c6e5dd9eafcb2    ok
iam-instance-profile:nodes.k8s.bethebeast.be    ok
autoscaling-group:nodes.k8s.bethebeast.be       ok
instance:i-0d693abcfba63bf08    ok
instance:i-00773cabb05d3b48a    ok
internet-gateway:igw-2875594e   still has dependencies, will retry
iam-instance-profile:masters.k8s.bethebeast.be  ok
iam-role:nodes.k8s.bethebeast.be        ok
iam-role:masters.k8s.bethebeast.be      ok
subnet:subnet-6de6ca25  still has dependencies, will retry
autoscaling-config:master-us-east-1a.masters.k8s.bethebeast.be-20170804111534   ok
autoscaling-config:nodes.k8s.bethebeast.be-20170804111534       ok
volume:vol-09f97238f26eb8ad0    still has dependencies, will retry
subnet:subnet-0f117b23  still has dependencies, will retry
volume:vol-03e62dca7968d86c4    still has dependencies, will retry
security-group:sg-b424cbc4      still has dependencies, will retry
security-group:sg-623cd312      still has dependencies, will retry
Not all resources deleted; waiting before reattempting deletion
        route-table:rtb-eee9ff96
        subnet:subnet-0f117b23
        subnet:subnet-6de6ca25
        security-group:sg-b424cbc4
        volume:vol-03e62dca7968d86c4
        security-group:sg-623cd312
        volume:vol-09f97238f26eb8ad0
        internet-gateway:igw-2875594e
        vpc:vpc-d5e69dac
        dhcp-options:dopt-930850f5
volume:vol-03e62dca7968d86c4    still has dependencies, will retry
volume:vol-09f97238f26eb8ad0    still has dependencies, will retry
subnet:subnet-6de6ca25  still has dependencies, will retry

# upgrade cluster to newer kubernetes

14:34:23 ~/bethebeast.be [~d47zm3@w0rk~] » kops edit cluster ${CLUSTER}

Set the KubernetesVersion to the target version (e.g. v1.6.4)

14:35:34 ~/bethebeast.be [~d47zm3@w0rk~] » kops rolling-update cluster --name ${CLUSTER} --state ${KOPS_STATE_STORE}
NAME                    STATUS  NEEDUPDATE      READY   MIN     MAX     NODES
master-us-east-1a       Ready   0               1       1       1       1
nodes                   Ready   0               2       2       2       2

14:36:04 ~/bethebeast.be [~d47zm3@w0rk~] » kops update cluster ${CLUSTER}
I0804 14:36:23.050578   57526 executor.go:91] Tasks: 0 done / 65 total; 34 can run
I0804 14:36:24.051297   57526 executor.go:91] Tasks: 34 done / 65 total; 13 can run
I0804 14:36:24.755574   57526 executor.go:91] Tasks: 47 done / 65 total; 16 can run
I0804 14:36:26.166693   57526 executor.go:91] Tasks: 63 done / 65 total; 2 can run
I0804 14:36:26.315541   57526 executor.go:91] Tasks: 65 done / 65 total; 0 can run
Will modify resources:
  LaunchConfiguration/master-us-east-1a.masters.k8s.bethebeast.be
        UserData
                                ...
                                  cat > kube_env.yaml << __EOF_KUBE_ENV
                                  Assets:
                                + - 950077deba04d297916e1d9add7d073a1d8a540b@https://storage.googleapis.com/kubernetes-release/release/v1.6.4/bin/linux/amd64/kubelet
                                - - 57afca200aa6cec74fcc3072cae12385014f59c0@https://storage.googleapis.com/kubernetes-release/release/v1.6.2/bin/linux/amd64/kubelet
                                + - d7814fce2c929866bc7880950c907797e10110a0@https://storage.googleapis.com/kubernetes-release/release/v1.6.4/bin/linux/amd64/kubectl
                                - - 984095cd0fe8a8172ab92e2ee0add49dfc46e0c2@https://storage.googleapis.com/kubernetes-release/release/v1.6.2/bin/linux/amd64/kubectl
                                  - 1d9788b0f5420e1a219aad2cb8681823fc515e7c@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff.tar.gz
                                  - e783785020d85426e1d12a7f78aaacc511ffaf0e@https://kubeupv2.s3.amazonaws.com/kops/1.6.2/linux/amd64/utils.tar.gz
                                ...


  LaunchConfiguration/nodes.k8s.bethebeast.be
        UserData
                                ...
                                  cat > kube_env.yaml << __EOF_KUBE_ENV
                                  Assets:
                                + - 950077deba04d297916e1d9add7d073a1d8a540b@https://storage.googleapis.com/kubernetes-release/release/v1.6.4/bin/linux/amd64/kubelet
                                - - 57afca200aa6cec74fcc3072cae12385014f59c0@https://storage.googleapis.com/kubernetes-release/release/v1.6.2/bin/linux/amd64/kubelet
                                + - d7814fce2c929866bc7880950c907797e10110a0@https://storage.googleapis.com/kubernetes-release/release/v1.6.4/bin/linux/amd64/kubectl
                                - - 984095cd0fe8a8172ab92e2ee0add49dfc46e0c2@https://storage.googleapis.com/kubernetes-release/release/v1.6.2/bin/linux/amd64/kubectl
                                  - 1d9788b0f5420e1a219aad2cb8681823fc515e7c@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff.tar.gz
                                  - e783785020d85426e1d12a7f78aaacc511ffaf0e@https://kubeupv2.s3.amazonaws.com/kops/1.6.2/linux/amd64/utils.tar.gz
                                ...


Must specify --yes to apply changes

14:36:26 ~/bethebeast.be [~d47zm3@w0rk~] » kops update cluster ${CLUSTER} --yes
I0804 14:37:12.862044   57640 executor.go:91] Tasks: 0 done / 65 total; 34 can run
I0804 14:37:13.834998   57640 executor.go:91] Tasks: 34 done / 65 total; 13 can run
I0804 14:37:14.332710   57640 executor.go:91] Tasks: 47 done / 65 total; 16 can run
I0804 14:37:16.375932   57640 executor.go:91] Tasks: 63 done / 65 total; 2 can run
I0804 14:37:16.799119   57640 executor.go:91] Tasks: 65 done / 65 total; 0 can run
I0804 14:37:16.799176   57640 dns.go:152] Pre-creating DNS records
I0804 14:37:17.401472   57640 update_cluster.go:229] Exporting kubecfg for cluster
Kops has set your kubectl context to k8s.bethebeast.be

Cluster changes have been applied to the cloud.
Changes may require instances to restart: kops rolling-update cluster

14:37:18 ~/bethebeast.be [~d47zm3@w0rk~] » kops rolling-update cluster --name ${CLUSTER} --state ${KOPS_STATE_STORE}
NAME                    STATUS          NEEDUPDATE      READY   MIN     MAX     NODES
master-us-east-1a       NeedsUpdate     1               0       1       1       1
nodes                   NeedsUpdate     2               0       2       2       2

Must specify --yes to rolling-update.
14:37:48 ~/bethebeast.be [~d47zm3@w0rk~] » kops rolling-update cluster --name ${CLUSTER} --state ${KOPS_STATE_STORE} --yes
NAME                    STATUS          NEEDUPDATE      READY   MIN     MAX     NODES
master-us-east-1a       NeedsUpdate     1               0       1       1       1
nodes                   NeedsUpdate     2               0       2       2       2
I0804 14:38:11.927780   57793 instancegroups.go:347] Stopping instance "i-0e84a13baadcc6c21", node "ip-172-20-49-150.ec2.internal", in AWS ASG "master-us-east-1a.masters.k8s.bethebeast.be".
I0804 14:43:12.312770   57793 instancegroups.go:347] Stopping instance "i-01c93d8b05ef09c55", node "ip-172-20-66-31.ec2.internal", in AWS ASG "nodes.k8s.bethebeast.be".
I0804 14:45:13.373851   57793 instancegroups.go:347] Stopping instance "i-0bfdf8e1e818f2af1", node "ip-172-20-50-193.ec2.internal", in AWS ASG "nodes.k8s.bethebeast.be".
I0804 14:47:14.065970   57793 rollingupdate.go:174] Rolling update completed!


14:58:29 ~/bethebeast.be [~d47zm3@w0rk~] » kubectl get nodes
NAME                            STATUS    AGE       VERSION
ip-172-20-35-1.ec2.internal     Ready     17m       v1.6.4
ip-172-20-49-207.ec2.internal   Ready     11m       v1.6.4
ip-172-20-89-38.ec2.internal    Ready     13m       v1.6.4

# edit instance group - chose other os for example

15:01:50 ~/bethebeast.be [~d47zm3@w0rk~] » kops get ig
Using cluster from kubectl context: k8s.bethebeast.be

NAME                    ROLE    MACHINETYPE     MIN     MAX     SUBNETS
master-us-east-1a       Master  t2.small        1       1       us-east-1a
nodes                   Node    t2.small        2       2       us-east-1a,us-east-1b

# taken for docs on github - kops or coreos, can't remember
15:05:16 ~/bethebeast.be [~d47zm3@w0rk~] » aws ec2 describe-images --region=us-east-1 --owner=595879546273 \
    --filters "Name=virtualization-type,Values=hvm" "Name=name,Values=CoreOS-stable*" \
    --query 'sort_by(Images,&CreationDate)[-1].{id:ImageLocation}'
{
    "id": "595879546273/CoreOS-stable-1409.7.0-hvm"
}

15:02:21 ~/bethebeast.be [~d47zm3@w0rk~] » kops  edit ig nodes # change ImageID
Using cluster from kubectl context: k8s.bethebeast.be

15:06:37 ~/bethebeast.be [~d47zm3@w0rk~] » kops update cluster ${CLUSTER}
I0804 15:07:01.561380   60937 executor.go:91] Tasks: 0 done / 65 total; 34 can run
I0804 15:07:02.657613   60937 executor.go:91] Tasks: 34 done / 65 total; 13 can run
I0804 15:07:03.483410   60937 executor.go:91] Tasks: 47 done / 65 total; 16 can run
I0804 15:07:04.632237   60937 executor.go:91] Tasks: 63 done / 65 total; 2 can run
I0804 15:07:04.787189   60937 executor.go:91] Tasks: 65 done / 65 total; 0 can run
Will modify resources:
  LaunchConfiguration/nodes.k8s.bethebeast.be
        ImageID                  ami-b2137ea4 -> 595879546273/CoreOS-stable-1409.7.0-hvm

Must specify --yes to apply changes

15:07:04 ~/bethebeast.be [~d47zm3@w0rk~] » kops update cluster ${CLUSTER} --yes
I0804 15:07:42.974015   61031 executor.go:91] Tasks: 0 done / 65 total; 34 can run
I0804 15:07:44.001498   61031 executor.go:91] Tasks: 34 done / 65 total; 13 can run
I0804 15:07:44.537271   61031 executor.go:91] Tasks: 47 done / 65 total; 16 can run
I0804 15:07:46.920735   61031 executor.go:91] Tasks: 63 done / 65 total; 2 can run
I0804 15:07:47.239598   61031 executor.go:91] Tasks: 65 done / 65 total; 0 can run
I0804 15:07:47.239655   61031 dns.go:152] Pre-creating DNS records
I0804 15:07:47.862280   61031 update_cluster.go:229] Exporting kubecfg for cluster
Kops has set your kubectl context to k8s.bethebeast.be

Cluster changes have been applied to the cloud.

Changes may require instances to restart: kops rolling-update cluster

15:07:49 ~/bethebeast.be [~d47zm3@w0rk~] » kops rolling-update cluster
Using cluster from kubectl context: k8s.bethebeast.be

NAME                    STATUS          NEEDUPDATE      READY   MIN     MAX     NODES
master-us-east-1a       Ready           0               1       1       1       1
nodes                   NeedsUpdate     2               0       2       2       2

Must specify --yes to rolling-update.

15:08:05 ~/bethebeast.be [~d47zm3@w0rk~] » kops rolling-update cluster --yes
Using cluster from kubectl context: k8s.bethebeast.be

NAME                    STATUS          NEEDUPDATE      READY   MIN     MAX     NODES
master-us-east-1a       Ready           0               1       1       1       1
nodes                   NeedsUpdate     2               0       2       2       2
I0804 15:08:15.865527   61123 instancegroups.go:347] Stopping instance "i-00cca9fc377f7025d", node "ip-172-20-89-38.ec2.internal", in AWS ASG "nodes.k8s.bethebeast.be".
I0804 15:10:16.154601   61123 instancegroups.go:347] Stopping instance "i-0719c8444530c9096", node "ip-172-20-49-207.ec2.internal", in AWS ASG "nodes.k8s.bethebeast.be".
I0804 15:12:16.895029   61123 rollingupdate.go:174] Rolling update completed!
15:12:16 ~/bethebeast.be [~d47zm3@w0rk~] » kubectl get nodes
NAME                            STATUS    AGE       VERSION
ip-172-20-35-1.ec2.internal     Ready     31m       v1.6.4
ip-172-20-61-168.ec2.internal   Ready     47s       v1.6.4
ip-172-20-69-208.ec2.internal   Ready     2m        v1.6.4

15:12:34 ~/bethebeast.be [~d47zm3@w0rk~] » kubectl describe node ip-172-20-61-168.ec2.internal
Name:                   ip-172-20-61-168.ec2.internal
Role:
Labels:                 beta.kubernetes.io/arch=amd64
                        beta.kubernetes.io/instance-type=t2.small
                        beta.kubernetes.io/os=linux
                        failure-domain.beta.kubernetes.io/region=us-east-1
                        failure-domain.beta.kubernetes.io/zone=us-east-1a
                        kubernetes.io/hostname=ip-172-20-61-168.ec2.internal
                        kubernetes.io/role=node
                        node-role.kubernetes.io/node=
Annotations:            node.alpha.kubernetes.io/ttl=0
                        volumes.kubernetes.io/controller-managed-attach-detach=true
Taints:                 
CreationTimestamp:      Fri, 04 Aug 2017 15:11:47 +0200
Conditions:
  Type                  Status  LastHeartbeatTime                       LastTransitionTime                      Reason                          Message
  ----                  ------  -----------------                       ------------------                      ------                          -------
  OutOfDisk             False   Fri, 04 Aug 2017 15:13:08 +0200         Fri, 04 Aug 2017 15:11:47 +0200         KubeletHasSufficientDisk        kubelet has sufficient disk space available
  MemoryPressure        False   Fri, 04 Aug 2017 15:13:08 +0200         Fri, 04 Aug 2017 15:11:47 +0200         KubeletHasSufficientMemory      kubelet has sufficient memory available
  DiskPressure          False   Fri, 04 Aug 2017 15:13:08 +0200         Fri, 04 Aug 2017 15:11:47 +0200         KubeletHasNoDiskPressure        kubelet has no disk pressure
  Ready                 True    Fri, 04 Aug 2017 15:13:08 +0200         Fri, 04 Aug 2017 15:12:07 +0200         KubeletReady                    kubelet is posting ready status
  NetworkUnavailable    False   Fri, 04 Aug 2017 15:11:56 +0200         Fri, 04 Aug 2017 15:11:56 +0200         RouteCreated                    RouteController created a route
Addresses:
  InternalIP:   172.20.61.168
  LegacyHostIP: 172.20.61.168
  ExternalIP:   52.202.6.64
  InternalDNS:  ip-172-20-61-168.ec2.internal
  ExternalDNS:  ec2-52-202-6-64.compute-1.amazonaws.com
  Hostname:     ip-172-20-61-168.ec2.internal
Capacity:
 cpu:           1
 memory:        2050684Ki
 pods:          110
Allocatable:
 cpu:           1
 memory:        1948284Ki
 pods:          110
System Info:
 Machine ID:                    e73bb3850eaa4bcc80b9d5671b038b65
 System UUID:                   EC2AA003-4546-D140-D03B-21C98BCB8EA2
 Boot ID:                       bb0fcdf5-486f-4442-b8fb-3dc2c93c6532
 Kernel Version:                4.11.11-coreos
 OS Image:                      Container Linux by CoreOS 1409.7.0 (Ladybug)
 Operating System:              linux
 Architecture:                  amd64
 Container Runtime Version:     docker://1.12.6
 Kubelet Version:               v1.6.4
 Kube-Proxy Version:            v1.6.4
PodCIDR:                        100.96.7.0/24
ExternalID:                     i-0e2c9e9a0f9f6781f
Non-terminated Pods:            (1 in total)
  Namespace                     Name                                                    CPU Requests    CPU Limits      Memory Requests Memory Limits
  ---------                     ----                                                    ------------    ----------      --------------- -------------
  kube-system                   kube-proxy-ip-172-20-61-168.ec2.internal                100m (10%)      0 (0%)          0 (0%)          0 (0%)
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  CPU Requests  CPU Limits      Memory Requests Memory Limits
  ------------  ----------      --------------- -------------
  100m (10%)    0 (0%)          0 (0%)          0 (0%)
Events:
  FirstSeen     LastSeen        Count   From                                            SubObjectPath   Type            Reason                  Message
  ---------     --------        -----   ----                                            -------------   --------        ------                  -------
  1m            1m              1       kubelet, ip-172-20-61-168.ec2.internal                          Normal          Starting                Starting kubelet.
  1m            1m              1       kubelet, ip-172-20-61-168.ec2.internal                          Warning         ImageGCFailed           unable to find data for container /
  1m            1m              2       kubelet, ip-172-20-61-168.ec2.internal                          Normal          NodeHasSufficientDisk   Node ip-172-20-61-168.ec2.internal status is now: NodeHasSufficientDisk
  1m            1m              2       kubelet, ip-172-20-61-168.ec2.internal                          Normal          NodeHasSufficientMemory Node ip-172-20-61-168.ec2.internal status is now: NodeHasSufficientMemory
  1m            1m              2       kubelet, ip-172-20-61-168.ec2.internal                          Normal          NodeHasNoDiskPressure   Node ip-172-20-61-168.ec2.internal status is now: NodeHasNoDiskPressure
  1m            1m              1       kube-proxy, ip-172-20-61-168.ec2.internal                       Normal          Starting                Starting kube-proxy.
  1m            1m              1       kubelet, ip-172-20-61-168.ec2.internal                          Normal          NodeReady               Node ip-172-20-61-168.ec2.internal status is now: NodeReady

15:13:37 ~/bethebeast.be [~d47zm3@w0rk~] » ssh core@52.202.6.64
The authenticity of host '52.202.6.64 (52.202.6.64)' can't be established.
ECDSA key fingerprint is SHA256:O3ooutITflGeWCW6w6KHK07o+HZFN/1tqrzJ2BrBJc4.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '52.202.6.64' (ECDSA) to the list of known hosts.
Container Linux by CoreOS stable (1409.7.0)
core@ip-172-20-61-168 ~ $

The end for now.

exposing ingress on kubernetes node ingress

Running single node cluster has it’s limitations… after applying ingress on cluster I had to somehow expose it without using any kind of loadbalancer. I did it using this trick:

# edit ingress deployment and add hostNetwork: true
20:21:52 ~ [d47zm3@beast]  » kubectl edit deployment --namespace=kube-system nginx-ingress-controller
...
        ports:
        - containerPort: 80
          hostPort: 80
          protocol: TCP
        - containerPort: 443
          hostPort: 443
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        resources: {}
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      hostNetwork: true # THIS IS IMPORTANT PART
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      serviceAccount: ingress
      serviceAccountName: ingress
      terminationGracePeriodSeconds: 60

Now try to delete pod so it will be recreated and test your ingress again.

kube-lego on kubernetes cluster – rbac

This little buddy will work wonders for you… It will automatically request certificates from Let’s Encrypt for all your ingress domains… it requires RBAC role ofcourse. Let’s see how to implement it, I’m using NGINX ingress controller. You have also choice to use GKE one (see kube-lego repository here).

# this is my ingres, please note it requires annotation so lego can pick it up - tls-acme
# ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: {{ template "fullname" . }}
  annotations:
    ingress.kubernetes.io/ssl-redirect: "true"
    ingress.kubernetes.io/proxy-body-size: 10m
    kubernetes.io/tls-acme: "true"
spec:
  tls:
  - hosts:
    - bethebeast.pl
  rules:
  - host: bethebeast.pl
    http:
      paths:
      - backend:
          serviceName: {{ $.Release.Name }}-wordpress
          servicePort: 80
        path: /
# configmap with my e-mail, deployment itself, namespace and rbac role
# namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
    name: kube-lego
# deployment.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kube-lego
  namespace: kube-lego
spec:
  replicas: 1
  template:
    metadata:
      labels:
        app: kube-lego
    spec:
      containers:
      - name: kube-lego
        image: jetstack/kube-lego:0.1.5
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
        env:
        - name: LEGO_EMAIL
          valueFrom:
            configMapKeyRef:
              name: kube-lego
              key: lego.email
        - name: LEGO_URL
          valueFrom:
            configMapKeyRef:
              name: kube-lego
              key: lego.url
        - name: LEGO_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: LEGO_POD_IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        readinessProbe:
          httpGet:
            path: /healthz
            port: 8080
          initialDelaySeconds: 5
          timeoutSeconds: 1
# configmap, change e-mail to yours
apiVersion: v1
metadata:
  name: kube-lego
  namespace: kube-lego
data:
  # modify this to specify your address
  lego.email: "secret@gmail.com"
  # configure letsencrypt's production api
  lego.url: "https://acme-v01.api.letsencrypt.org/directory"
kind: ConfigMap
# rbac role
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: ingress-secret-admin
rules:
- apiGroups: [""]
  resources: ["secrets"]
  verbs:
  - get
  - watch
  - list
  - create
  - update
  - patch
- apiGroups: [""]
  resources: ["services"]
  verbs:
  - get
  - create
- apiGroups: ["extensions"]
  resources: ["ingresses"]
  verbs:
  - get
  - watch
  - list
  - create
  - update
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: kube-lego
roleRef:
  kind: ClusterRole
  name: ingress-secret-admin
  apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
  name: default
  namespace: kube-lego

upgrading minor version of kubernetes cluster – centos

Note, always take backup before, this is not production case

19:59:26 ~ [d47zm3@beast]  » kubectl get nodes -o wide
NAME         STATUS    AGE       VERSION   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION
bethebeast   Ready     15d       v1.7.1            CentOS Linux 7 (Core)   3.10.0-327.4.5.el7.x86_64
# run yum update
20:04:30 ~ [d47zm3@beast]  » rpm -qa | grep kube
kubelet-1.7.3-1.x86_64
kubeadm-1.7.3-1.x86_64
kubectl-1.7.3-1.x86_64
kubernetes-cni-0.5.1-0.x86_64
20:02:39 ~ [d47zm3@beast]  » sudo systemctl restart kubelet
Warning: kubelet.service changed on disk. Run 'systemctl daemon-reload' to reload units.
20:02:46 ~ [d47zm3@beast]  » sudo systemctl daemon-reload
20:02:48 ~ [d47zm3@beast]  » kubectl get nodes -o wide
NAME         STATUS     AGE       VERSION   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION
bethebeast   NotReady   15d       v1.7.3            CentOS Linux 7 (Core)   3.10.0-327.4.5.el7.x86_64
20:02:52 ~ [d47zm3@beast]  » kubectl get nodes -o wide
NAME         STATUS    AGE       VERSION   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION
bethebeast   Ready     15d       v1.7.3            CentOS Linux 7 (Core)   3.10.0-327.4.5.el7.x86_64

kubernetes – adding tls certificate on ingress / increasing POST body size

For dreaded error

413 request entity too large

see annotation below.

You will need certificate and key (TLS certificate) to create secret, then

kubectl create secret tls bethebeast-secret --key=bethebeast.pl.key --cert=bethebeast.pl.cert
kubectl get secret

Then use it in ingress configuration (snippet from Helm chart)

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: {{ template "fullname" . }}
  annotations:
    ingress.kubernetes.io/ssl-redirect: "true"
    ingress.kubernetes.io/proxy-body-size: 10m # this annotation increases allowed POST body size
spec:
  tls:
  - hosts:
    - bethebeast.pl
    secretName: bethebeast-secret
  rules:
  - host: bethebeast.pl
    http:
      paths:
      - backend:
          serviceName: {{ $.Release.Name }}-wordpress
          servicePort: 80
        path: /

kubernetes with rbac and tiller server

CREDITS TO THIS GUY: janwillies/kubernetes-rbac-howto

Basically

kubectl create serviceaccount tiller --namespace=kube-system
kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
helm init # if you did it before, that's ok, no need to worry, tiller will be redeployed
kubectl --namespace=kube-system edit deployment tiller-deploy
### add serviceAccount: tiller to the spec-section, e.g.:
...
spec:
  template:
    spec:
      [...]
      restartPolicy: Always
      serviceAccount: tiller
      schedulerName: default-scheduler
      [...]