wip: kube2 kubernetes

This commit is contained in:
2024-02-22 14:08:39 +00:00
parent 37d2b0f8a5
commit dc99966881
12 changed files with 886 additions and 8 deletions

1
group_vars/kube2.yaml Normal file
View File

@@ -0,0 +1 @@
is_k8s_master: true

1
join-k8s-command Normal file
View File

@@ -0,0 +1 @@
kubeadm join 192.168.122.123:6443 --token HISTORY_PURGED_SECRET --discovery-token-ca-cert-hash sha256:11f5dff0e8e3c0a627720e60c6c4b8ee69169959006571f5b00f064b9a69ff17

View File

@@ -34,18 +34,10 @@
roles:
- { role : monitoring-master, tags : [ "monitoring-master", "icinga", "grafana" ] }
- hosts: typo3-cms
roles:
- { role : typo3-cms, tags : [ "typo3" ] }
- hosts: paperless
roles:
- { role : paperless, tags : [ "paperless" ] }
- hosts: vault-pki
roles:
- { role : vault-pki, tags : [ "pki_master", "vault" ] }
- hosts: vpn
roles:
- { role : openvpn, tags : [ "openvpn", "vpn", "certificate-manager" ] }
@@ -61,3 +53,7 @@
- hosts: nextcloud ths
roles:
- { role: nextcloud, tags: ["nextcloud"] }
- hosts: kube2
roles:
- { role: kubernetes-base, tags: ["kubernetes"] }

View File

@@ -45,6 +45,11 @@
name: opendkim
state: restarted
- name: restart docker
systemd:
name: docker
state: restarted
- name: restart slapd
systemd:
name: slapd-custom

View File

@@ -0,0 +1,522 @@
# Calico Version v3.3.7
# https://docs.projectcalico.org/v3.3/releases#v3.3.7
# This manifest includes the following component versions:
# calico/node:v3.3.7
# calico/cni:v3.3.7
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas
# below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is
# essential.
typha_service_name: "none"
# Configure the Calico backend to use.
calico_backend: "bird"
# Configure the MTU to use
veth_mtu: "1440"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
selector:
k8s-app: calico-typha
---
# This manifest creates a Deployment of Typha to back the above service.
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the calico-config ConfigMap above.
#
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: 0
revisionHistoryLimit: 2
template:
metadata:
labels:
k8s-app: calico-typha
annotations:
# This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical
# add-on, ensuring it gets priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
serviceAccountName: calico-node
containers:
- image: calico/typha:v3.3.7
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
env:
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
# Disable logging to file and syslog since those don't make sense in Kubernetes.
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
# Monitor the Kubernetes API to find the number of running instances and rebalance
# connections.
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
# Uncomment these lines to enable prometheus metrics. Since Typha is host-networked,
# this opens a port on the host, which may need to be secured.
#- name: TYPHA_PROMETHEUSMETRICSENABLED
# value: "true"
#- name: TYPHA_PROMETHEUSMETRICSPORT
# value: "9093"
livenessProbe:
exec:
command:
- calico-typha
- check
- liveness
periodSeconds: 30
initialDelaySeconds: 30
readinessProbe:
exec:
command:
- calico-typha
- check
- readiness
periodSeconds: 10
---
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.3.7
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: calico-config
key: typha_service_name
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "10.10.0.0/18"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -bird-ready
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.3.7
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Create all the CustomResourceDefinitions needed for
# Calico policy and networking mode.
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy

View File

@@ -0,0 +1,19 @@
disabled_plugins = []
#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0
#[grpc]
# address = "/run/containerd/containerd.sock"
# uid = 0
# gid = 0
#[debug]
# address = "/run/containerd/debug.sock"
# uid = 0
# gid = 0
# level = "info"
[plugins."io.containerd.grpc.v1.cri"]
systemd_cgroup = true

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,92 @@
# Calico Version v3.3.7
# https://docs.projectcalico.org/v3.3/releases#v3.3.7
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
- apiGroups: [""]
resources:
- namespaces
- serviceaccounts
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- services
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
verbs:
- get
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- clusterinformations
- hostendpoints
verbs:
- create
- get
- list
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system

View File

@@ -0,0 +1,2 @@
dependencies:
- global-handlers

View File

@@ -0,0 +1,155 @@
- name: include services ports
include_vars: kubernetes.yaml
- name: Configure K8S Master Block
block:
- name: Initialise the Kubernetes cluster using kubeadm
become: true
command: kubeadm init --apiserver-advertise-address={{ ansible_default_ipv4.address }} --pod-network-cidr={{ k8s_pod_network }}
args:
creates: "{{ k8s_admin_config }}"
- name: Wait for apiserver to become ready
wait_for:
port: 6443
sleep: 10
- name: Setup kubeconfig for {{ k8s_user }} user
file:
path: "{{ k8s_user_home }}/.kube"
state: directory
owner: "{{ k8s_user }}"
group: "{{ k8s_user }}"
mode: "0750"
- name: Copy {{ k8s_admin_config }}
become: true
copy:
src: "{{ k8s_admin_config }}"
dest: "{{ k8s_user_home }}/.kube/config"
owner: "{{ k8s_user }}"
group: "{{ k8s_user }}"
mode: "0640"
remote_src: yes
# - name: Copy {{ calico_rbac_config }}
# copy:
# src: "{{ calico_rbac_config }}"
# dest: "{{ k8s_user_home }}/{{ calico_rbac_config }}"
# owner: "{{ k8s_user }}"
# group: "{{ k8s_user }}"
# mode: "0640"
#
# - name: Copy {{ calico_net_url }}
# copy:
# src: "{{ calico_net_config }}"
# dest: "{{ k8s_user_home }}/{{ calico_net_config }}"
# owner: "{{ k8s_user }}"
# group: "{{ k8s_user }}"
# mode: "0640"
#
# - name: Set CALICO_IPV4POOL_CIDR to {{ k8s_pod_network }}
# replace:
# path: "{{ k8s_user_home }}/{{ calico_net_config }}"
# regexp: "192.168.0.0/16"
# replace: "{{ k8s_pod_network }}"
- name: Download Dashboard
get_url:
url: "{{ dashboard_url }}"
dest: "{{ k8s_user_home }}/{{ dashboard_config }}"
owner: "{{ k8s_user }}"
group: "{{ k8s_user }}"
mode: "0640"
# - name: Install calico pod network {{ calico_rbac_config }}
# remote_user: false
# remote_user: "{{ k8s_user }}"
# command: kubectl apply -f "{{ k8s_user_home }}/{{ calico_rbac_config }}"
#
# - name: Install calico pod network {{ calico_net_config }}
# become: false
# remote_user: "{{ k8s_user }}"
# command: kubectl apply -f "{{ k8s_user_home }}/{{ calico_net_config }}"
- name: Install K8S dashboard {{ dashboard_config }}
become: false
remote_user: "{{ k8s_user }}"
command: kubectl apply -f "{{ k8s_user_home }}/{{ dashboard_config }}"
- name: Create service account
become: false
remote_user: "{{ k8s_user }}"
command: kubectl create serviceaccount dashboard -n default
ignore_errors: yes
- name: Create cluster role binding dashboard-admin
remote_user: "{{ k8s_user }}"
become: false
command: kubectl create clusterrolebinding dashboard-admin -n default --clusterrole=cluster-admin --serviceaccount=default:dashboard
ignore_errors: yes
- name: Create {{ k8s_dashboard_adminuser_config }} for service account
copy:
src: "files/{{ k8s_dashboard_adminuser_config }}"
dest: "{{ k8s_user_home }}/{{ k8s_dashboard_adminuser_config }}"
owner: "{{ k8s_user }}"
group: "{{ k8s_user }}"
mode: "0640"
- name: Create service account
become: false
remote_user: "{{ k8s_user }}"
command: kubectl apply -f "{{ k8s_user_home }}/{{ k8s_dashboard_adminuser_config }}"
ignore_errors: yes
- name: Create cluster role binding cluster-system-anonymous
become: false
remote_user: "{{ k8s_user }}"
command: kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
ignore_errors: yes
- name: Test K8S dashboard and wait for HTTP 200
uri:
url: "{{ k8s_dashboard_url }}"
status_code: 200
validate_certs: no
ignore_errors: yes
register: result_k8s_dashboard_page
retries: 10
delay: 6
until: result_k8s_dashboard_page is succeeded
- name: K8S dashboard URL
debug:
var: k8s_dashboard_url
- name: Generate join command
command: kubeadm token create --print-join-command
register: join_command
- name: Copy join command to local file
become: false
remote_user: "{{ k8s_user }}"
copy:
content: "{{ join_command.stdout_lines[0] }}"
dest: "{{ k8s_token_file }}"
delegate_to: localhost
when: is_k8s_master is defined and is_k8s_master
- name: Configure K8S Node Block
block:
- name: Copy {{ k8s_token_file }} to server location
copy:
src: "{{ k8s_token_file }}"
dest: "{{ k8s_user_home }}/{{ k8s_token_file }}.sh"
owner: "{{ k8s_user }}"
group: "{{ k8s_user }}"
mode: "0750"
- name: Join the node to cluster unless file {{ k8s_kubelet_config }} exists
become: true
command: sh "{{ k8s_user_home }}/{{ k8s_token_file }}.sh"
args:
creates: "{{ k8s_kubelet_config }}"
when: is_k8s_node is defined and is_k8s_node

View File

@@ -0,0 +1,53 @@
- name: Debian | Add GPG Keys
apt_key:
url: "https://download.docker.com/linux/debian/gpg"
- name: Debian | Add Repo Source
apt_repository:
repo: "deb [arch=amd64] https://download.docker.com/linux/debian bullseye stable"
update_cache: yes
- name: Debian | Configure Sysctl
sysctl:
name: "net.ipv4.ip_forward"
value: "1"
state: present
- name: Install Docker prerequisites
apt:
state: present
pkg:
- docker-ce
- gpg
- name: Fix CRI Plugin containerd config
copy:
src: containerd.toml
dest: /etc/containerd/containerd.toml
mode: 0644
notify: restart docker
- name: Debian | Add GPG Key
apt_key:
url: "https://packages.cloud.google.com/apt/doc/apt-key.gpg"
- name: Debian | Add Kubernetes Repository
apt_repository:
repo: "deb https://apt.kubernetes.io/ kubernetes-xenial main"
update_cache: yes
- name: Debian | Install Dependencies
apt:
pkg:
- kubernetes-cni
- kubelet
state: present
- name: Debian | Install Kubernetes
apt:
pkg:
- kubeadm
- kubectl
state: present
- include: cluster_setup.yaml

15
vars/kubernetes.yaml Normal file
View File

@@ -0,0 +1,15 @@
k8s_pod_network: "10.10.0.0/18"
k8s_user: "sheppy"
k8s_user_home: "/home/{{ k8s_user }}"
k8s_token_file: "join-k8s-command"
k8s_admin_config: "/etc/kubernetes/admin.conf"
k8s_dashboard_adminuser_config: "dashboard-adminuser.yaml"
k8s_kubelet_config: "/etc/kubernetes/kubelet.conf"
k8s_dashboard_port: "6443"
k8s_dashboard_url: "https://{{ ansible_default_ipv4.address }}:{{ k8s_dashboard_port }}/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login"
calico_rbac_url: "https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml"
calico_rbac_config: "rbac-kdd.yaml"
calico_net_url: "https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml"
calico_net_config: "calico.yaml"
dashboard_url: "https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml"
dashboard_config: "kubernetes-dashboard.yml"