-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathrelease.py
More file actions
403 lines (358 loc) · 16.6 KB
/
release.py
File metadata and controls
403 lines (358 loc) · 16.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
import logging
from datetime import datetime
from django.utils import timezone
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.contrib.auth import get_user_model
from django.db.models import F, Func, Value, JSONField
from api.tasks import run_pipeline
from api.exceptions import DryccException, AlreadyExists
from scheduler import KubeHTTPException
from scheduler.resources.pod import DEFAULT_CONTAINER_PORT
from ..utils import DeployLock
from .base import UuidAuditedModel
from .appsettings import AppSettings
User = get_user_model()
logger = logging.getLogger(__name__)
class Release(UuidAuditedModel):
"""
Software release deployed by the application platform
Releases contain a :class:`Build` and a :class:`Config`.
"""
STATE_CHOICES = (
("created", "Release created but not deployed"),
("crashed", "Release pipeline runtime crashed"),
("succeed", "Release pipeline runtime succeed"),
)
owner = models.ForeignKey(User, on_delete=models.PROTECT)
app = models.ForeignKey('App', on_delete=models.CASCADE)
state = models.TextField(choices=STATE_CHOICES, default=STATE_CHOICES[0][0])
version = models.PositiveIntegerField()
summary = models.TextField(blank=True, null=True)
failed = models.BooleanField(default=False)
exception = models.TextField(blank=True, null=True)
conditions = models.JSONField(default=list)
config = models.ForeignKey('Config', on_delete=models.CASCADE)
build = models.ForeignKey('Build', null=True, on_delete=models.CASCADE)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'version'),)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.version_name)
@property
def procfile_types(self):
if self.build is not None:
return self.build.procfile_types
return []
@property
def version_name(self):
return f'v{self.version}'
def get_runners(self, procfile_types):
results = []
procfile_types = self.procfile_types if not procfile_types else procfile_types
for run in self.build.dryccfile.get('run', []):
for container_type in procfile_types:
when_ptypes = run.get('when', {}).get('ptypes', [])
if not when_ptypes or container_type in when_ptypes:
image = run.get('image', self.build.get_image(container_type))
results.append({
'image': self.build.get_image(image, default_image=image),
'args': run.get('args', []),
'command': run.get('command', []),
'timeout': run.get('timeout', settings.DRYCC_PILELINE_RUN_TIMEOUT),
})
break
return results
def add_condition(self, **kwargs):
if "created" not in kwargs:
kwargs["created"] = datetime.now(timezone.utc).strftime(settings.DRYCC_DATETIME_FORMAT)
type(self).objects.filter(pk=self.pk).update(
conditions=Func(
F("conditions"),
Value(["0"]),
Value(kwargs, JSONField()),
function="jsonb_insert",
)
)
def get_deploy_image(self, container_type):
"""
In the deploy phase of dryccfile
Return the kubernetes "container image" to be sent off to the scheduler.
"""
image = self.build.dryccfile.get('deploy', {}).get(container_type, {}).get(
'image', self.build.get_image(container_type))
return self.build.get_image(image, default_image=image)
def get_deploy_args(self, container_type):
"""
In the deploy phase of dryccfile
Return the kubernetes "container arguments" to be sent off to the scheduler.
"""
if self.build is not None:
if self.build.dryccfile:
return self.build.dryccfile['deploy'].get(container_type, {}).get('args', [])
else:
# dockerfile or container image
if self.build.dockerfile or not self.build.sha:
# has profile
if self.build.procfile and container_type in self.build.procfile:
args = self.build.procfile[container_type]
return args.split()
return []
def get_deploy_command(self, container_type):
"""
In the deploy phase of dryccfile
Return the kubernetes "container command" to be sent off to the scheduler.
"""
return self.build.dryccfile.get(
'deploy', {}).get(container_type, {}).get('command', [])
def log(self, message, level=logging.INFO):
"""Logs a message in the context of this application.
This prefixes log messages with an application "tag" that the customized
drycc-logspout will be on the lookout for. When it's seen, the message-- usually
an application event of some sort like releasing or scaling, will be considered
as "belonging" to the application instead of the controller and will be handled
accordingly.
"""
logger.log(level, "[{}]: {}".format(self.app.id, message))
def new(self, user, config, build, summary=None):
"""
Create a new application release using the provided Build and Config
on behalf of a user.
Releases start at v1 and auto-increment.
"""
# construct fully-qualified target image
new_version = self.app.release_set.latest().version + 1
# create new release and auto-increment version
return Release.objects.create(
owner=user, app=self.app, config=config,
build=build, version=new_version, summary=summary
)
def get_port(self, procfile_type):
"""
Get application port for a given release. If pulling from private registry
then use default port or read from ENV var, otherwise attempt to pull from
the container image
"""
return int(self.config.typed_values.get(
procfile_type, {}).get(
'PORT', self.config.values.get('PORT', DEFAULT_CONTAINER_PORT)))
def deploy(self, procfile_types=None, force_deploy=False):
lock = DeployLock(self.app.pk)
if not lock.acquire(procfile_types, force=force_deploy):
raise DryccException('there is an executing pipeline, please wait or force deploy')
run_pipeline.delay(self, procfile_types, force_deploy)
def previous(self):
"""
Return the previous Release to this one.
:return: the previous :class:`Release`, or None
"""
releases = self.app.release_set
if self.pk:
releases = releases.exclude(pk=self.pk)
q = Q(failed=False, state="succeed")
try:
app_settings = self.app.appsettings_set.latest()
if app_settings.autorollback is False:
q = Q(state__in=["crashed", "succeed"])
except AppSettings.DoesNotExist:
pass
try:
# Get the Release previous to this one
prev_release = releases.filter(q).latest()
except Release.DoesNotExist:
prev_release = None
return prev_release
def rollback(self, user, procfile_types=None, version=None):
try:
# if no version is provided then grab version from object
version = (self.version - 1) if version is None else int(version)
if version < 1:
raise DryccException('version cannot be below 0')
elif version == 1:
raise DryccException('Cannot roll back to initial release.')
prev = self.app.release_set.get(version=version)
if prev.failed:
raise DryccException('Cannot roll back to failed release.')
latest_version = self.app.release_set.latest().version
new_release = self.new(
user,
build=prev.build,
config=prev.config,
summary="{} rolled back to v{}".format(user, version),
)
if self.build is not None:
new_release.deploy(procfile_types, force_deploy=True)
return new_release
except Exception as e:
# check if the exception is during create or publish
if ('new_release' not in locals() and 'latest_version' in locals() and
self.app.release_set.latest().version == latest_version+1):
new_release = self.app.release_set.latest()
new_release.state = "crashed"
new_release.failed = True
if new_release.summary:
new_release.summary += " "
new_release.summary += "{} performed roll back to a release that failed".format(
self.owner)
# Get the exception that has occured
new_release.exception = "error: {}".format(str(e))
# avoid overwriting other fields
new_release.save(update_fields=["state", "failed", "summary", "exception"])
raise DryccException(str(e)) from e
def cleanup_old(self, procfile_types=None):
"""
Cleanup any old resources from Kubernetes
This includes any RSs that are no longer considered the latest release (just a safety net)
Secrets no longer tied to any ReplicaSet
Stray pods no longer relevant to the latest release
"""
self.log(
'Cleaning up RSs for releases older than {} (latest)'.format(self.version_name),
level=logging.DEBUG
)
# base labels
labels = {'heritage': 'drycc'}
if procfile_types is not None:
labels['type__in'] = procfile_types
# Cleanup controllers
replica_sets_removal = []
replica_sets = self.scheduler().rs.get(self.app.id, labels=labels).json()['items']
if not replica_sets:
replica_sets = []
for replica_set in replica_sets:
current_version_name = replica_set['metadata']['labels']['version']
# skip the latest release
if current_version_name == self.version_name:
continue
# aggregate versions together to removal all at once
if current_version_name not in replica_sets_removal:
replica_sets_removal.append(current_version_name)
if replica_sets_removal:
self.log(
'Found the following versions to cleanup: {}'.format(
', '.join(replica_sets_removal)),
level=logging.DEBUG
)
# this is RC related
for version_name in replica_sets_removal:
self._delete_release_in_scheduler(self.app.id, procfile_types, version_name)
# handle Deployments specific cleanups
self._cleanup_deployment_secrets_and_configs(self.app.id, procfile_types)
# Remove stray pods
self._cleanup_stray_pods(self.app.id, procfile_types, self.version_name)
def _cleanup_stray_pods(self, namespace, procfile_types, latest_version_name):
labels = {'heritage': 'drycc'}
if procfile_types is not None:
labels['type__in'] = procfile_types
pods = self.scheduler().pod.get(namespace, labels=labels).json()['items']
if not pods:
pods = []
for pod in pods:
if self.scheduler().pod.deleted(pod):
continue
current_version_name = pod['metadata']['labels']['version']
# skip the latest release
if current_version_name == latest_version_name:
continue
try:
self.scheduler().pod.delete(namespace, pod['metadata']['name'])
except KubeHTTPException as e:
# Sometimes k8s will manage to remove the pod from under us
if e.response.status_code == 404:
continue
def _cleanup_deployment_secrets_and_configs(self, namespace, procfile_types=None):
"""
Clean up any environment secrets (and in the future ConfigMaps) that
are tied to a release Deployments no longer track
This is done by checking the available ReplicaSets and only removing
objects not attached to anything. This will allow releases done outside
of Drycc Controller
"""
# Find all ReplicaSets
version_names = [self.version_name, ]
labels = {'heritage': 'drycc', 'app': namespace}
replicasets = self.scheduler().rs.get(namespace, labels=labels).json()['items']
if not replicasets:
replicasets = []
for replicaset in replicasets:
if (
'version' not in replicaset['metadata']['labels'] or
replicaset['metadata']['labels']['version'] in version_names
):
continue
version_names.append(replicaset['metadata']['labels']['version'])
# find all env secrets not owned by any replicaset
labels = {
'heritage': 'drycc',
'app': namespace,
'class': 'env',
# http://kubernetes.io/docs/user-guide/labels/#set-based-requirement
'version__notin': version_names
}
if procfile_types is not None:
labels['type__in'] = procfile_types
self.log('Cleaning up orphaned env var secrets for application {}'.format(namespace), level=logging.DEBUG) # noqa
secrets = self.scheduler().secret.get(namespace, labels=labels).json()['items']
if not secrets:
secrets = []
for secret in secrets:
self.scheduler().secret.delete(namespace, secret['metadata']['name'])
def _delete_release_in_scheduler(self, namespace, procfile_types, version_name):
"""
Deletes a specific release in k8s based on ReplicationController
Scale RSs to 0 then delete RSs and the version specific
secret that container the env var
"""
labels = {
'heritage': 'drycc',
'app': namespace,
'version': version_name
}
if procfile_types is not None:
labels['type__in'] = procfile_types
# see if the app config has deploy timeout preference, otherwise use global
timeout = self.config.values.get('DRYCC_DEPLOY_TIMEOUT', settings.DRYCC_DEPLOY_TIMEOUT)
replica_sets = self.scheduler().rs.get(namespace, labels=labels).json()['items']
if not replica_sets:
replica_sets = []
for replica_set in replica_sets:
# Deployment takes care of this in the API, RS does not
# Have the RS scale down pods and delete itself
try:
self.scheduler().rs.scale(namespace, replica_set['metadata']['name'], 0, timeout)
self.scheduler().rs.delete(namespace, replica_set['metadata']['name'])
except KubeHTTPException as e:
if e.response.status_code != 404:
raise
def save(self, *args, **kwargs): # noqa
if not self.summary:
self.summary = ''
prev_release = self.previous()
# compare this build to the previous build
old_build = prev_release.build if prev_release else None
old_config = prev_release.config if prev_release else None
# if the build changed, log it and who pushed it
if self.version == 1:
self.summary += "{} created initial release".format(self.app.owner)
elif self.build != old_build:
if self.build.sha:
self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
else:
self.summary += "{} deployed {}".format(self.build.owner, self.build.image)
elif self.config != old_config:
for field, diff in self.config.diff(old_config).items():
diff_list = []
for diff_type, values in diff.items():
diff_list.append(f'{diff_type} {field} {", ".join(values.keys())}')
if diff_list:
changes = ', '.join(diff_list)
self.summary += "{} {}".format(self.config.owner, changes)
if not self.summary:
if self.version == 1:
self.summary = "{} created the initial release".format(self.owner)
else:
# There were no changes to this release
raise AlreadyExists("{} changed nothing - release stopped".format(self.owner))
super(Release, self).save(*args, **kwargs)