From 3a2c575e129254158e1250ca478442c10423a65e Mon Sep 17 00:00:00 2001 From: Jeroen Vermeulen Date: Mon, 11 Sep 2023 18:28:49 +0200 Subject: [PATCH] Mayastor, Velero, Rancher improved --- playbook.yml | 4 + roles/common/defaults/main.yml | 2 +- roles/local_path_storage/tasks/main.yml | 169 ++++++++++++++++++ roles/mayastor/meta/main.yml | 3 + roles/mayastor/tasks/main.yml | 55 ++++++ .../not_used/hashicorp_secrets/meta/main.yml | 3 + .../not_used/hashicorp_secrets/tasks/main.yml | 84 +++++++++ roles/rancher/tasks/main.yml | 64 +++---- .../tasks/create_control_configs.yml | 2 +- .../tasks/create_worker_configs.yml | 2 +- roles/talos_reboot/meta/main.yml | 3 + roles/talos_reboot/tasks/main.yml | 27 +++ roles/talos_upgrade/tasks/main.yml | 11 +- roles/traefik/tasks/main.yml | 2 +- roles/velero/meta/main.yml | 3 + roles/velero/tasks/main.yml | 104 +++++++++++ services.yml | 17 ++ talos_reboot.yml | 8 + talos_upgrade.yml | 2 +- 19 files changed, 523 insertions(+), 42 deletions(-) create mode 100644 roles/local_path_storage/tasks/main.yml create mode 100644 roles/mayastor/meta/main.yml create mode 100644 roles/mayastor/tasks/main.yml create mode 100644 roles/not_used/hashicorp_secrets/meta/main.yml create mode 100644 roles/not_used/hashicorp_secrets/tasks/main.yml create mode 100644 roles/talos_reboot/meta/main.yml create mode 100644 roles/talos_reboot/tasks/main.yml create mode 100644 roles/velero/meta/main.yml create mode 100644 roles/velero/tasks/main.yml create mode 100644 talos_reboot.yml diff --git a/playbook.yml b/playbook.yml index 7692b12..56b0768 100644 --- a/playbook.yml +++ b/playbook.yml @@ -1,6 +1,10 @@ --- - name: Include playbook talos ansible.builtin.import_playbook: talos.yml + tags: + - talos - name: Include playbook services ansible.builtin.import_playbook: services.yml + tags: + - services \ No newline at end of file diff --git a/roles/common/defaults/main.yml b/roles/common/defaults/main.yml index 124f526..9818a3f 100644 --- a/roles/common/defaults/main.yml +++ b/roles/common/defaults/main.yml @@ -1,6 +1,6 @@ --- kubernetes_version: v1.26.7 -talos_version: v1.5.1 +talos_version: v1.5.2 ansible_root_dir: "{{ inventory_dir | ansible.builtin.dirname }}" ansible_vault_password_file: "{{ ansible_root_dir }}/.ansible/vault_pass" talos_generic_config_dir: "{{ ansible_root_dir }}/configs/talos" diff --git a/roles/local_path_storage/tasks/main.yml b/roles/local_path_storage/tasks/main.yml new file mode 100644 index 0000000..9ae4788 --- /dev/null +++ b/roles/local_path_storage/tasks/main.yml @@ -0,0 +1,169 @@ +--- +# https://github.com/rancher/local-path-provisioner + +- name: Local-Path storage Namespace + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + apiVersion: v1 + kind: Namespace + metadata: + name: local-path-storage + labels: + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/warn: privileged + +- name: Local-Path storage ServiceAccount + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: local-path-provisioner-service-account + namespace: local-path-storage + +- name: Local-Path storage ClusterRole + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: local-path-provisioner-role + rules: + - apiGroups: [ "" ] + resources: [ "nodes", "persistentvolumeclaims", "configmaps" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "endpoints", "persistentvolumes", "pods" ] + verbs: [ "*" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "patch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + +- name: Local-Path storage ClusterRoleBinding + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: local-path-provisioner-bind + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role + subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: local-path-storage + +- name: Local-Path storage ConfigMap + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + kind: ConfigMap + apiVersion: v1 + metadata: + name: local-path-config + namespace: local-path-storage + data: + config.json: |- + { + "nodePathMap": [ + { + "node": "DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths": [ "/var/local" ] + } + ] + } + setup: |- + #!/bin/sh + set -eu + mkdir -m 0777 -p "$VOL_DIR" + teardown: |- + #!/bin/sh + set -eu + rm -rf "$VOL_DIR" + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + containers: + - name: helper-pod + image: busybox + imagePullPolicy: IfNotPresent + +- name: Local-Path storage Deployment + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: local-path-provisioner + namespace: local-path-storage + spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: rancher/local-path-provisioner:v0.0.24 + imagePullPolicy: IfNotPresent + command: + - local-path-provisioner + - --debug + - start + - --config + - /etc/config/config.json + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config + +- name: Local-Path storage StorageClass + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: local-path + annotations: + storageclass.beta.kubernetes.io/is-default-class: 'true' + storageclass.kubernetes.io/is-default-class: 'true' + provisioner: rancher.io/local-path + parameters: + nodePath: /var/local + volumeBindingMode: WaitForFirstConsumer + reclaimPolicy: Delete diff --git a/roles/mayastor/meta/main.yml b/roles/mayastor/meta/main.yml new file mode 100644 index 0000000..fdda41b --- /dev/null +++ b/roles/mayastor/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common diff --git a/roles/mayastor/tasks/main.yml b/roles/mayastor/tasks/main.yml new file mode 100644 index 0000000..9e6ab1c --- /dev/null +++ b/roles/mayastor/tasks/main.yml @@ -0,0 +1,55 @@ +--- +- name: Namespace for Mayastor + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + kind: Namespace + metadata: + name: mayastor + labels: + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/warn: privileged + +- name: Helm add Mayastor repo + delegate_to: "{{ kubectl_host }}" + run_once: true + kubernetes.core.helm_repository: + name: mayastor + repo_url: "https://openebs.github.io/mayastor-extensions/" + +- name: Helm deploy Mayastor + delegate_to: "{{ kubectl_host }}" + kubernetes.core.helm: + kubeconfig: "{{ kubeconfig }}" + chart_ref: mayastor/mayastor + release_name: mayastor + release_namespace: mayastor + create_namespace: false + wait: true + # https://github.com/openebs/mayastor-extensions/blob/develop/chart/values.yaml + values: + io_engine: + cpuCount: 1 + etcd: + replicaCount: 3 + tolerations: + - effect: NoSchedule + operator: Exists + +- name: StorageClass mayastor-2replicas + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: mayastor-2replicas + namespace: mayastor + parameters: + ioTimeout: "30" + protocol: nvmf + repl: "2" + provisioner: io.openebs.csi-mayastor diff --git a/roles/not_used/hashicorp_secrets/meta/main.yml b/roles/not_used/hashicorp_secrets/meta/main.yml new file mode 100644 index 0000000..fdda41b --- /dev/null +++ b/roles/not_used/hashicorp_secrets/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common diff --git a/roles/not_used/hashicorp_secrets/tasks/main.yml b/roles/not_used/hashicorp_secrets/tasks/main.yml new file mode 100644 index 0000000..41719f5 --- /dev/null +++ b/roles/not_used/hashicorp_secrets/tasks/main.yml @@ -0,0 +1,84 @@ +--- + +# +# Not tested and finished yet. +# + +- name: Helm add Hashicorp repo + delegate_to: "{{ kubectl_host }}" + run_once: true + kubernetes.core.helm_repository: + name: hashicorp + repo_url: "https://helm.releases.hashicorp.com" + +- name: Helm deploy Hashicorp Vault Secrets Operator + delegate_to: "{{ kubectl_host }}" + kubernetes.core.helm: + kubeconfig: "{{ kubeconfig }}" + chart_ref: hashicorp/vault-secrets-operator + release_name: vault-secrets-operator + release_namespace: vault-secrets-operator-system + create_namespace: true + wait: true + # https://github.com/hashicorp/vault-secrets-operator/blob/main/chart/values.yaml + values: + defaultVaultConnection: + enabled: true + address: "https://zabbix.snel.com:8200" + skipTLSVerify: false + spec: + template: + spec: + containers: + - name: manager + args: + - "--client-cache-persistence-model=direct-encrypted" + +- name: VaultAuth + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + apiVersion: secrets.hashicorp.com/v1beta1 + kind: VaultAuth + metadata: + name: static-auth + namespace: app + spec: + method: kubernetes + mount: demo-auth-mount + kubernetes: + role: role1 + serviceAccount: default + audiences: + - vault + +- name: VaultStaticSecret + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + apiVersion: secrets.hashicorp.com/v1beta1 + kind: VaultStaticSecret + metadata: + name: vault-kv-app + namespace: app + spec: + type: kv-v2 + + # mount path + mount: kvv2 + + # path of the secret + path: webapp/config + + # dest k8s secret + destination: + name: secretkv + create: true + + # static secret refresh interval + refreshAfter: 30s + + # Name of the CRD to authenticate to Vault + vaultAuthRef: static-auth \ No newline at end of file diff --git a/roles/rancher/tasks/main.yml b/roles/rancher/tasks/main.yml index 148503e..0178146 100644 --- a/roles/rancher/tasks/main.yml +++ b/roles/rancher/tasks/main.yml @@ -60,19 +60,6 @@ - name: When Bitwarden item not found when: "not _rancher_password_item | length" block: - - name: Get Rancher bootstrap secret - delegate_to: "{{ kubectl_host }}" - kubernetes.core.k8s_info: - kubeconfig: "{{ kubeconfig }}" - kind: Secret - name: bootstrap-secret - namespace: cattle-system - register: _rancher_bootstrap_secret - - - name: Set rancher password fact - ansible.builtin.set_fact: - _rancher_bootstrap_password: "{{ _rancher_bootstrap_secret.resources[0].data.bootstrapPassword | ansible.builtin.b64decode }}" - - name: Create random password ansible.builtin.set_fact: _rancher_password: "{{ lookup('ansible.builtin.password', '/dev/null') }}" @@ -106,7 +93,35 @@ cmd: "bw create item {{ _bitwarden_password_item | ansible.builtin.to_json | ansible.builtin.b64encode }}" changed_when: true - - name: Rancher login +- name: Rancher login + delegate_to: "{{ kubectl_host }}" + ansible.builtin.uri: + url: "https://{{ rancher_hostname }}/v3-public/localProviders/local?action=login" + method: POST + body_format: json + body: + username: admin + password: "{{ _rancher_password }}" + status_code: [201,401] + register: _rancher_login + +- name: Rancher change password + when: "_rancher_login.status == 401" + block: + - name: Get Rancher bootstrap secret + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s_info: + kubeconfig: "{{ kubeconfig }}" + kind: Secret + name: bootstrap-secret + namespace: cattle-system + register: _rancher_bootstrap_secret + + - name: Set rancher password fact + ansible.builtin.set_fact: + _rancher_bootstrap_password: "{{ _rancher_bootstrap_secret.resources[0].data.bootstrapPassword | ansible.builtin.b64decode }}" + + - name: Rancher login using bootstrap password delegate_to: "{{ kubectl_host }}" ansible.builtin.uri: url: "https://{{ rancher_hostname }}/v3-public/localProviders/local?action=login" @@ -131,27 +146,6 @@ newPassword: "{{ _rancher_password }}" status_code: [200] - - name: Rancher logout - delegate_to: "{{ kubectl_host }}" - ansible.builtin.uri: - url: "https://{{ rancher_hostname }}/v3/tokens?action=logout" - method: POST - headers: - Cookie: "R_SESS={{ _rancher_pwchange_login.json.token }}" - status_code: [200] - -- name: Rancher login - delegate_to: "{{ kubectl_host }}" - ansible.builtin.uri: - url: "https://{{ rancher_hostname }}/v3-public/localProviders/local?action=login" - method: POST - body_format: json - body: - username: admin - password: "{{ _rancher_password }}" - status_code: [201] - register: _rancher_login - - name: Rancher logout delegate_to: "{{ kubectl_host }}" ansible.builtin.uri: diff --git a/roles/talos_configs_create/tasks/create_control_configs.yml b/roles/talos_configs_create/tasks/create_control_configs.yml index 66ba9c5..66d92fa 100644 --- a/roles/talos_configs_create/tasks/create_control_configs.yml +++ b/roles/talos_configs_create/tasks/create_control_configs.yml @@ -16,4 +16,4 @@ --kubernetes-version '{{ kubernetes_version }}' --additional-sans '{{ talos_control_lb_hostname }}' --force - creates: "{{ talos_node_config_file }}" +# creates: "{{ talos_node_config_file }}" diff --git a/roles/talos_configs_create/tasks/create_worker_configs.yml b/roles/talos_configs_create/tasks/create_worker_configs.yml index e229e25..e875502 100644 --- a/roles/talos_configs_create/tasks/create_worker_configs.yml +++ b/roles/talos_configs_create/tasks/create_worker_configs.yml @@ -16,4 +16,4 @@ --kubernetes-version '{{ kubernetes_version }}' --additional-sans '{{ talos_control_lb_hostname }}' --force - creates: "{{ talos_node_config_file }}" +# creates: "{{ talos_node_config_file }}" diff --git a/roles/talos_reboot/meta/main.yml b/roles/talos_reboot/meta/main.yml new file mode 100644 index 0000000..fdda41b --- /dev/null +++ b/roles/talos_reboot/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common diff --git a/roles/talos_reboot/tasks/main.yml b/roles/talos_reboot/tasks/main.yml new file mode 100644 index 0000000..604bb9c --- /dev/null +++ b/roles/talos_reboot/tasks/main.yml @@ -0,0 +1,27 @@ +--- + +- name: Import talos_machine_status tasks + ansible.builtin.import_tasks: "{{ role_path }}/../../shared/tasks/talos_machine_status.yml" + +- name: Reboot Talos + delegate_to: "{{ talosctl_host }}" + become: no + throttle: 1 + ansible.builtin.command: + cmd: >- + talosctl reboot + --wait=true + --endpoints '{{ ansible_remote }}' + --nodes '{{ ansible_remote }}' + {% if talos_machine_status.spec.stage == 'maintenance' %} --insecure{% endif %} + changed_when: true + environment: + TALOSCONFIG: "{{ talosconfig }}" + +- name: Wait for Talos port 50000 + delegate_to: "{{ talosctl_host }}" + become: no + throttle: 1 + ansible.builtin.wait_for: + host: "{{ ansible_remote }}" + port: 50000 diff --git a/roles/talos_upgrade/tasks/main.yml b/roles/talos_upgrade/tasks/main.yml index c8b5dbc..14a0543 100644 --- a/roles/talos_upgrade/tasks/main.yml +++ b/roles/talos_upgrade/tasks/main.yml @@ -3,14 +3,21 @@ - name: Upgrade Talos delegate_to: "{{ talosctl_host }}" become: no + throttle: 1 ansible.builtin.command: cmd: >- talosctl upgrade --image="ghcr.io/siderolabs/installer:{{ talos_version }}" - --endpoints '{{ ansible_remote }}' + --endpoints '{{ talos_control_lb_hostname }}' --nodes '{{ ansible_remote }}' + --wait=true changed_when: false environment: TALOSCONFIG: "{{ talosconfig }}" - +- name: Wait for Talos port 50000 + delegate_to: "{{ talosctl_host }}" + become: no + ansible.builtin.wait_for: + host: "{{ ansible_remote }}" + port: 50000 diff --git a/roles/traefik/tasks/main.yml b/roles/traefik/tasks/main.yml index 7a1da45..88afbe9 100644 --- a/roles/traefik/tasks/main.yml +++ b/roles/traefik/tasks/main.yml @@ -12,7 +12,7 @@ kubernetes.core.helm: kubeconfig: "{{ kubeconfig }}" chart_ref: traefik/traefik - release_name: metallb + release_name: traefik release_namespace: traefik create_namespace: true wait: true diff --git a/roles/velero/meta/main.yml b/roles/velero/meta/main.yml new file mode 100644 index 0000000..fdda41b --- /dev/null +++ b/roles/velero/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common diff --git a/roles/velero/tasks/main.yml b/roles/velero/tasks/main.yml new file mode 100644 index 0000000..52a6f7d --- /dev/null +++ b/roles/velero/tasks/main.yml @@ -0,0 +1,104 @@ +--- + +- name: Helm add VMware Tanzu repo for Velero + delegate_to: "{{ kubectl_host }}" + run_once: true + kubernetes.core.helm_repository: + name: vmware-tanzu + repo_url: "https://vmware-tanzu.github.io/helm-charts" + +- name: Namespace for Velero + delegate_to: "{{ kubectl_host }}" + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig }}" + resource_definition: + kind: Namespace + metadata: + name: velero + labels: + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/warn: privileged + +#- name: Secret for Velero +# delegate_to: "{{ kubectl_host }}" +# kubernetes.core.k8s: +# kubeconfig: "{{ kubeconfig }}" +# resource_definition: +# apiVersion: v1 +# kind: Secret +# metadata: +# name: wasabi +# namespace: velero +# labels: +# app.kubernetes.io/name: {{ include "velero.name" . }} +# app.kubernetes.io/instance: {{ .Release.Name }} +# app.kubernetes.io/managed-by: {{ .Release.Service }} +# helm.sh/chart: {{ include "velero.chart" . }} +# type: Opaque +# data: + +- name: Helm deploy Velero + delegate_to: "{{ kubectl_host }}" + kubernetes.core.helm: + kubeconfig: "{{ kubeconfig }}" + chart_ref: vmware-tanzu/velero + release_name: velero + release_namespace: velero + create_namespace: false + wait: true + # https://github.com/vmware-tanzu/helm-charts/blob/main/charts/velero/values.yaml + values: + initContainers: + - name: velero-plugin-for-aws + image: velero/velero-plugin-for-aws:latest + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /target + name: plugins + - name: openebs-velero-plugin + image: openebs/velero-plugin:latest + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /target + name: plugins + configuration: + # https://github.com/vmware-tanzu/velero-plugin-for-aws/blob/main/backupstoragelocation.md + # https://velero.io/docs/v1.12/api-types/backupstoragelocation/#parameter-reference + backupStorageLocation: + - name: wasabi + provider: velero.io/aws + bucket: snelcom-velero + default: true + credential: + name: wasabi-secret + key: cloud + config: + region: eu-central-2 + s3ForcePathStyle: true + s3Url: https://s3.eu-central-2.wasabisys.com + # https://github.com/vmware-tanzu/velero-plugin-for-aws/blob/main/volumesnapshotlocation.md + # https://velero.io/docs/v1.12/api-types/volumesnapshotlocation/#parameter-reference + volumeSnapshotLocation: + - name: wasabi + provider: openebs.io/cstor-blockstore + bucket: snelcom-velero + default: true + credential: + name: wasabi-secret + key: cloud + config: + namespace: mayastor + local: "true" + credentials: + name: wasabi-secret + secretContents: + cloud: | + [default] + aws_access_key_id=PZJC2PIGBBUGV6SADUTH + aws_secret_access_key=Ir3eLXvZflinjIe01MDag6s0ReE1Af3zCG5bS4ID + + + + + diff --git a/services.yml b/services.yml index f0077ee..61b454b 100644 --- a/services.yml +++ b/services.yml @@ -8,6 +8,23 @@ - talos_first_nodes roles: - role: metallb + tags: + - metallb - role: traefik + tags: + - traefik - role: cert_manager + tags: + - cert_manager - role: rancher + tags: + - rancher + - role: local_path_storage + tags: + - local_path_storage + - role: mayastor + tags: + - mayastor + - role: velero + tags: + - velero diff --git a/talos_reboot.yml b/talos_reboot.yml new file mode 100644 index 0000000..b84de0e --- /dev/null +++ b/talos_reboot.yml @@ -0,0 +1,8 @@ +--- + +- name: Reset Talos + gather_facts: false + hosts: + - talos + roles: + - role: talos_reboot diff --git a/talos_upgrade.yml b/talos_upgrade.yml index 3208c56..64d85fd 100644 --- a/talos_upgrade.yml +++ b/talos_upgrade.yml @@ -1,6 +1,6 @@ --- -- name: Reset Talos +- name: Upgrade Talos gather_facts: false hosts: - talos