forked from dandi/dandi-hub
-
Notifications
You must be signed in to change notification settings - Fork 0
/
teardown.yml
201 lines (178 loc) · 5.97 KB
/
teardown.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
---
#### Jupyterhub to Zero #### tear down up through -> helm release of jupyterhub, kubernetes, aws fixtures
- hosts: all
user: ec2-user
environment:
- AWS_REGION: "{{ aws_region }}"
- KOPS_STATE_STORE: s3://{{ namespace }}-s3
tasks:
- name: Install boto3
pip:
name: boto3
executable: /usr/bin/pip-3
become: yes
tags: always
## Fix issue with importing AliasedEventEmitter
- name: Upgrade awscli
pip:
name: awscli
executable: /usr/bin/pip-3
become: yes
tags: always
## Idempotent delete helm release of JH
- name: Determine if Helm is available
command: helm version
failed_when: False
changed_when: False
register: helmver_result
tags: always
- name: Determine if JH is currently installed.
command: helm status --namespace {{ namespace }} {{ namespace }}-jupyterhub
register: jh_installed_result
failed_when: False
changed_when: False
tags: always
# delete the Helm release. This deletes all resources that were created
# by Helm for your JupyterHub deployment
- name: Delete helm release
command: helm delete {{ namespace }}-jupyterhub --namespace {{ namespace }}
when: helmver_result.stdout is defined and not helmver_result.rc and not jh_installed_result.rc
tags: always
- name: check if nodes yaml file exists
stat:
path: /home/ec2-user/{{ namespace }}/cluster-autoscaler-multi-asg.yaml
register: autoscaler_yaml
tags: always
- block:
# Need to remove the spot group and autoscaler as well
- name: Detach role policy
shell: aws iam detach-role-policy --policy-arn arn:aws:iam::278212569472:policy/ig-policy --role-name nodes.{{ namespace }}.k8s.local
register: aws_result
until: aws_result.stderr.find('ig-policy was not found') != -1
retries: 2
delay: 5
failed_when:
- aws_result.rc != 0
- '"ig-policy was not found" not in aws_result.stderr'
- name: Delete the nodes spec files
file:
path: "{{ item }}"
state: absent
with_items:
- /home/ec2-user/{{ namespace }}/spot-ig.yaml
- /home/ec2-user/{{ namespace }}/spot-ig-gpu.yaml
- /home/ec2-user/{{ namespace }}/pod.yaml
- /home/ec2-user/{{ namespace }}/nodes1.yaml
- /home/ec2-user/{{ namespace }}/nodes2.yaml
- /home/ec2-user/{{ namespace }}/nodes3.yaml
- /home/ec2-user/{{ namespace }}/cluster-autoscaler-multi-asg.yaml
when: autoscaler_yaml.stat.exists == True
tags:
- never
- kubernetes
- all-fixtures
- name: Set kubectl context
command: kops export kubecfg --admin --name {{ namespace }}.k8s.local
ignore_errors: yes
tags: always
- name: Check for kube namespace
command: kubectl get namespace {{ namespace }}
failed_when: False
changed_when: False
register: get_kube_namespace
tags:
- never
- kubernetes
- all-fixtures
# Next, delete the Kubernetes namespace the hub was installed in.
# This deletes any disks that may have been created to store user’s
# data, and any IP addresses that may have been provisioned.
- name: Delete kubernetes namespace
command: kubectl delete namespace {{ namespace }}
when: not get_kube_namespace.rc
tags:
- never
- kubernetes
- all-fixtures
- name: Gather EFS facts
efs_info:
name: "{{ namespace }}-efs"
register: efsresult
tags:
- never
- kubernetes
- all-fixtures
- debug: msg="EFS details {{ efsresult['efs'] }}"
tags:
- never
- kubernetes
- all-fixtures
- name: Remove EFS mount targets
command: aws efs delete-mount-target --region {{ aws_region }} --mount-target-id {{ item['mount_target_id'] }}
with_items:
"{{ efsresult['efs'][0]['mount_targets'] }}"
tags:
- never
- kubernetes
- all-fixtures
when: efsresult['efs'][0] is defined
## For example, only delete the EFS if you specifically run with tag 'all-fixtures' or 'fixture-efs'
- name: Delete EFS fs
efs:
state: absent
name: "{{ namespace }}-efs"
tags:
- all-fixtures
- fixture-efs
- never
## Wait for mount targets to be removed
- name: sleep for 15 seconds and continue with play
wait_for: timeout=15
delegate_to: localhost
tags:
- never
- kubernetes
- all-fixtures
# delete security groups
- name: Delete NFS Security Group
ec2_group:
name: "{{ namespace }}.nfs"
state: absent
tags:
- all-fixtures
- kubernetes
- never
- name: Determine if cluster exists
shell: kops get clusters
tags:
- all-fixtures
- kubernetes
- never
failed_when: False
changed_when: False
register: get_cluster
- name: Delete kubernetes
shell: kops delete cluster {{ namespace }}.k8s.local --yes
tags:
- all-fixtures
- kubernetes
- never
when: not get_cluster.rc
- name: Delete S3 bucket
aws_s3:
bucket: "{{ namespace }}-s3"
mode: delete
register: s3_log
tags:
- all-fixtures
- fixture-s3
- never
- name: Gather EC2 Facts
ec2_instance_info:
filters:
"tag:Name": "{{ namespace }}-ci"
register: ec2_facts
tags:
- all-fixtures
- fixture-ci
- never