forked from easzlab/kubeasz
-
Notifications
You must be signed in to change notification settings - Fork 0
/
24.restore.yml
78 lines (66 loc) · 1.41 KB
/
24.restore.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# cluster-restore playbook
# read the guide: 'op/cluster_restore.md'
# to restore CA sth on 'deploy' node
- hosts: deploy
tasks:
- name: Restoring dirs of CA sth
file: name=/etc/kubernetes/ssl/ state=directory
- name: Restoring CA sth
copy:
src: "{{ base_dir }}/roles/cluster-backup/files/ca/{{ item }}"
dest: "{{ ca_dir }}/{{ item }}"
with_items:
- ca.pem
- ca-key.pem
- ca.csr
- ca-csr.json
- ca-config.json
- hosts: deploy
roles:
- deploy
# pre-tasks on all nodes
- hosts: all
roles:
- prepare
# [optional] only needed by multi-master cluster
- hosts: lb
roles:
- lb
# to install etcd cluster
- hosts: etcd
roles:
- etcd
# to install docker
- hosts:
- kube-master
- kube-node
- new-master
- new-node
roles:
- docker
# to set up 'kube-master' nodes
- hosts:
- kube-master
- new-master
roles:
- kube-master
- kube-node
#
tasks:
- name: Making master nodes SchedulingDisabled
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
when: DEPLOY_MODE != "allinone"
ignore_errors: true
- name: Setting master role name
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true
# to set up 'kube-node' nodes
- hosts:
- kube-node
- new-node
roles:
- kube-node
# to restore data of etcd cluster
- hosts: etcd
roles:
- cluster-restore