Mayastor, Velero, Rancher improved

This commit is contained in:
Jeroen Vermeulen 2023-09-11 18:28:49 +02:00
parent ce228aad70
commit 3a2c575e12
No known key found for this signature in database
19 changed files with 523 additions and 42 deletions

View File

@ -1,6 +1,10 @@
---
- name: Include playbook talos
ansible.builtin.import_playbook: talos.yml
tags:
- talos
- name: Include playbook services
ansible.builtin.import_playbook: services.yml
tags:
- services

View File

@ -1,6 +1,6 @@
---
kubernetes_version: v1.26.7
talos_version: v1.5.1
talos_version: v1.5.2
ansible_root_dir: "{{ inventory_dir | ansible.builtin.dirname }}"
ansible_vault_password_file: "{{ ansible_root_dir }}/.ansible/vault_pass"
talos_generic_config_dir: "{{ ansible_root_dir }}/configs/talos"

View File

@ -0,0 +1,169 @@
---
# https://github.com/rancher/local-path-provisioner
- name: Local-Path storage Namespace
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
labels:
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
- name: Local-Path storage ServiceAccount
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
- name: Local-Path storage ClusterRole
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
rules:
- apiGroups: [ "" ]
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "endpoints", "persistentvolumes", "pods" ]
verbs: [ "*" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "patch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- name: Local-Path storage ClusterRoleBinding
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
- name: Local-Path storage ConfigMap
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap": [
{
"node": "DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths": [ "/var/local" ]
}
]
}
setup: |-
#!/bin/sh
set -eu
mkdir -m 0777 -p "$VOL_DIR"
teardown: |-
#!/bin/sh
set -eu
rm -rf "$VOL_DIR"
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
containers:
- name: helper-pod
image: busybox
imagePullPolicy: IfNotPresent
- name: Local-Path storage Deployment
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-storage
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.24
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
- name: Local-Path storage StorageClass
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
annotations:
storageclass.beta.kubernetes.io/is-default-class: 'true'
storageclass.kubernetes.io/is-default-class: 'true'
provisioner: rancher.io/local-path
parameters:
nodePath: /var/local
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete

View File

@ -0,0 +1,3 @@
---
dependencies:
- role: common

View File

@ -0,0 +1,55 @@
---
- name: Namespace for Mayastor
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
kind: Namespace
metadata:
name: mayastor
labels:
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
- name: Helm add Mayastor repo
delegate_to: "{{ kubectl_host }}"
run_once: true
kubernetes.core.helm_repository:
name: mayastor
repo_url: "https://openebs.github.io/mayastor-extensions/"
- name: Helm deploy Mayastor
delegate_to: "{{ kubectl_host }}"
kubernetes.core.helm:
kubeconfig: "{{ kubeconfig }}"
chart_ref: mayastor/mayastor
release_name: mayastor
release_namespace: mayastor
create_namespace: false
wait: true
# https://github.com/openebs/mayastor-extensions/blob/develop/chart/values.yaml
values:
io_engine:
cpuCount: 1
etcd:
replicaCount: 3
tolerations:
- effect: NoSchedule
operator: Exists
- name: StorageClass mayastor-2replicas
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mayastor-2replicas
namespace: mayastor
parameters:
ioTimeout: "30"
protocol: nvmf
repl: "2"
provisioner: io.openebs.csi-mayastor

View File

@ -0,0 +1,3 @@
---
dependencies:
- role: common

View File

@ -0,0 +1,84 @@
---
#
# Not tested and finished yet.
#
- name: Helm add Hashicorp repo
delegate_to: "{{ kubectl_host }}"
run_once: true
kubernetes.core.helm_repository:
name: hashicorp
repo_url: "https://helm.releases.hashicorp.com"
- name: Helm deploy Hashicorp Vault Secrets Operator
delegate_to: "{{ kubectl_host }}"
kubernetes.core.helm:
kubeconfig: "{{ kubeconfig }}"
chart_ref: hashicorp/vault-secrets-operator
release_name: vault-secrets-operator
release_namespace: vault-secrets-operator-system
create_namespace: true
wait: true
# https://github.com/hashicorp/vault-secrets-operator/blob/main/chart/values.yaml
values:
defaultVaultConnection:
enabled: true
address: "https://zabbix.snel.com:8200"
skipTLSVerify: false
spec:
template:
spec:
containers:
- name: manager
args:
- "--client-cache-persistence-model=direct-encrypted"
- name: VaultAuth
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultAuth
metadata:
name: static-auth
namespace: app
spec:
method: kubernetes
mount: demo-auth-mount
kubernetes:
role: role1
serviceAccount: default
audiences:
- vault
- name: VaultStaticSecret
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
apiVersion: secrets.hashicorp.com/v1beta1
kind: VaultStaticSecret
metadata:
name: vault-kv-app
namespace: app
spec:
type: kv-v2
# mount path
mount: kvv2
# path of the secret
path: webapp/config
# dest k8s secret
destination:
name: secretkv
create: true
# static secret refresh interval
refreshAfter: 30s
# Name of the CRD to authenticate to Vault
vaultAuthRef: static-auth

View File

@ -60,19 +60,6 @@
- name: When Bitwarden item not found
when: "not _rancher_password_item | length"
block:
- name: Get Rancher bootstrap secret
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s_info:
kubeconfig: "{{ kubeconfig }}"
kind: Secret
name: bootstrap-secret
namespace: cattle-system
register: _rancher_bootstrap_secret
- name: Set rancher password fact
ansible.builtin.set_fact:
_rancher_bootstrap_password: "{{ _rancher_bootstrap_secret.resources[0].data.bootstrapPassword | ansible.builtin.b64decode }}"
- name: Create random password
ansible.builtin.set_fact:
_rancher_password: "{{ lookup('ansible.builtin.password', '/dev/null') }}"
@ -106,7 +93,35 @@
cmd: "bw create item {{ _bitwarden_password_item | ansible.builtin.to_json | ansible.builtin.b64encode }}"
changed_when: true
- name: Rancher login
- name: Rancher login
delegate_to: "{{ kubectl_host }}"
ansible.builtin.uri:
url: "https://{{ rancher_hostname }}/v3-public/localProviders/local?action=login"
method: POST
body_format: json
body:
username: admin
password: "{{ _rancher_password }}"
status_code: [201,401]
register: _rancher_login
- name: Rancher change password
when: "_rancher_login.status == 401"
block:
- name: Get Rancher bootstrap secret
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s_info:
kubeconfig: "{{ kubeconfig }}"
kind: Secret
name: bootstrap-secret
namespace: cattle-system
register: _rancher_bootstrap_secret
- name: Set rancher password fact
ansible.builtin.set_fact:
_rancher_bootstrap_password: "{{ _rancher_bootstrap_secret.resources[0].data.bootstrapPassword | ansible.builtin.b64decode }}"
- name: Rancher login using bootstrap password
delegate_to: "{{ kubectl_host }}"
ansible.builtin.uri:
url: "https://{{ rancher_hostname }}/v3-public/localProviders/local?action=login"
@ -131,27 +146,6 @@
newPassword: "{{ _rancher_password }}"
status_code: [200]
- name: Rancher logout
delegate_to: "{{ kubectl_host }}"
ansible.builtin.uri:
url: "https://{{ rancher_hostname }}/v3/tokens?action=logout"
method: POST
headers:
Cookie: "R_SESS={{ _rancher_pwchange_login.json.token }}"
status_code: [200]
- name: Rancher login
delegate_to: "{{ kubectl_host }}"
ansible.builtin.uri:
url: "https://{{ rancher_hostname }}/v3-public/localProviders/local?action=login"
method: POST
body_format: json
body:
username: admin
password: "{{ _rancher_password }}"
status_code: [201]
register: _rancher_login
- name: Rancher logout
delegate_to: "{{ kubectl_host }}"
ansible.builtin.uri:

View File

@ -16,4 +16,4 @@
--kubernetes-version '{{ kubernetes_version }}'
--additional-sans '{{ talos_control_lb_hostname }}'
--force
creates: "{{ talos_node_config_file }}"
# creates: "{{ talos_node_config_file }}"

View File

@ -16,4 +16,4 @@
--kubernetes-version '{{ kubernetes_version }}'
--additional-sans '{{ talos_control_lb_hostname }}'
--force
creates: "{{ talos_node_config_file }}"
# creates: "{{ talos_node_config_file }}"

View File

@ -0,0 +1,3 @@
---
dependencies:
- role: common

View File

@ -0,0 +1,27 @@
---
- name: Import talos_machine_status tasks
ansible.builtin.import_tasks: "{{ role_path }}/../../shared/tasks/talos_machine_status.yml"
- name: Reboot Talos
delegate_to: "{{ talosctl_host }}"
become: no
throttle: 1
ansible.builtin.command:
cmd: >-
talosctl reboot
--wait=true
--endpoints '{{ ansible_remote }}'
--nodes '{{ ansible_remote }}'
{% if talos_machine_status.spec.stage == 'maintenance' %} --insecure{% endif %}
changed_when: true
environment:
TALOSCONFIG: "{{ talosconfig }}"
- name: Wait for Talos port 50000
delegate_to: "{{ talosctl_host }}"
become: no
throttle: 1
ansible.builtin.wait_for:
host: "{{ ansible_remote }}"
port: 50000

View File

@ -3,14 +3,21 @@
- name: Upgrade Talos
delegate_to: "{{ talosctl_host }}"
become: no
throttle: 1
ansible.builtin.command:
cmd: >-
talosctl upgrade
--image="ghcr.io/siderolabs/installer:{{ talos_version }}"
--endpoints '{{ ansible_remote }}'
--endpoints '{{ talos_control_lb_hostname }}'
--nodes '{{ ansible_remote }}'
--wait=true
changed_when: false
environment:
TALOSCONFIG: "{{ talosconfig }}"
- name: Wait for Talos port 50000
delegate_to: "{{ talosctl_host }}"
become: no
ansible.builtin.wait_for:
host: "{{ ansible_remote }}"
port: 50000

View File

@ -12,7 +12,7 @@
kubernetes.core.helm:
kubeconfig: "{{ kubeconfig }}"
chart_ref: traefik/traefik
release_name: metallb
release_name: traefik
release_namespace: traefik
create_namespace: true
wait: true

View File

@ -0,0 +1,3 @@
---
dependencies:
- role: common

104
roles/velero/tasks/main.yml Normal file
View File

@ -0,0 +1,104 @@
---
- name: Helm add VMware Tanzu repo for Velero
delegate_to: "{{ kubectl_host }}"
run_once: true
kubernetes.core.helm_repository:
name: vmware-tanzu
repo_url: "https://vmware-tanzu.github.io/helm-charts"
- name: Namespace for Velero
delegate_to: "{{ kubectl_host }}"
kubernetes.core.k8s:
kubeconfig: "{{ kubeconfig }}"
resource_definition:
kind: Namespace
metadata:
name: velero
labels:
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
#- name: Secret for Velero
# delegate_to: "{{ kubectl_host }}"
# kubernetes.core.k8s:
# kubeconfig: "{{ kubeconfig }}"
# resource_definition:
# apiVersion: v1
# kind: Secret
# metadata:
# name: wasabi
# namespace: velero
# labels:
# app.kubernetes.io/name: {{ include "velero.name" . }}
# app.kubernetes.io/instance: {{ .Release.Name }}
# app.kubernetes.io/managed-by: {{ .Release.Service }}
# helm.sh/chart: {{ include "velero.chart" . }}
# type: Opaque
# data:
- name: Helm deploy Velero
delegate_to: "{{ kubectl_host }}"
kubernetes.core.helm:
kubeconfig: "{{ kubeconfig }}"
chart_ref: vmware-tanzu/velero
release_name: velero
release_namespace: velero
create_namespace: false
wait: true
# https://github.com/vmware-tanzu/helm-charts/blob/main/charts/velero/values.yaml
values:
initContainers:
- name: velero-plugin-for-aws
image: velero/velero-plugin-for-aws:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /target
name: plugins
- name: openebs-velero-plugin
image: openebs/velero-plugin:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /target
name: plugins
configuration:
# https://github.com/vmware-tanzu/velero-plugin-for-aws/blob/main/backupstoragelocation.md
# https://velero.io/docs/v1.12/api-types/backupstoragelocation/#parameter-reference
backupStorageLocation:
- name: wasabi
provider: velero.io/aws
bucket: snelcom-velero
default: true
credential:
name: wasabi-secret
key: cloud
config:
region: eu-central-2
s3ForcePathStyle: true
s3Url: https://s3.eu-central-2.wasabisys.com
# https://github.com/vmware-tanzu/velero-plugin-for-aws/blob/main/volumesnapshotlocation.md
# https://velero.io/docs/v1.12/api-types/volumesnapshotlocation/#parameter-reference
volumeSnapshotLocation:
- name: wasabi
provider: openebs.io/cstor-blockstore
bucket: snelcom-velero
default: true
credential:
name: wasabi-secret
key: cloud
config:
namespace: mayastor
local: "true"
credentials:
name: wasabi-secret
secretContents:
cloud: |
[default]
aws_access_key_id=PZJC2PIGBBUGV6SADUTH
aws_secret_access_key=Ir3eLXvZflinjIe01MDag6s0ReE1Af3zCG5bS4ID

View File

@ -8,6 +8,23 @@
- talos_first_nodes
roles:
- role: metallb
tags:
- metallb
- role: traefik
tags:
- traefik
- role: cert_manager
tags:
- cert_manager
- role: rancher
tags:
- rancher
- role: local_path_storage
tags:
- local_path_storage
- role: mayastor
tags:
- mayastor
- role: velero
tags:
- velero

8
talos_reboot.yml Normal file
View File

@ -0,0 +1,8 @@
---
- name: Reset Talos
gather_facts: false
hosts:
- talos
roles:
- role: talos_reboot

View File

@ -1,6 +1,6 @@
---
- name: Reset Talos
- name: Upgrade Talos
gather_facts: false
hosts:
- talos