-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathrelease.py
More file actions
497 lines (426 loc) · 20.8 KB
/
release.py
File metadata and controls
497 lines (426 loc) · 20.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
import logging
from django.conf import settings
from django.db import models
from registry import publish_release, get_port as docker_get_port, RegistryException
from api.utils import dict_diff
from api.models import UuidAuditedModel
from api.exceptions import DeisException, AlreadyExists
from scheduler import KubeHTTPException
logger = logging.getLogger(__name__)
class Release(UuidAuditedModel):
"""
Software release deployed by the application platform
Releases contain a :class:`Build` and a :class:`Config`.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)
app = models.ForeignKey('App', on_delete=models.CASCADE)
version = models.PositiveIntegerField()
summary = models.TextField(blank=True, null=True)
config = models.ForeignKey('Config', on_delete=models.CASCADE)
build = models.ForeignKey('Build', null=True, on_delete=models.CASCADE)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'version'),)
def __str__(self):
return "{0}-v{1}".format(self.app.id, self.version)
@property
def image(self):
# Builder pushes to internal registry, exclude SHA based images from being returned
registry = self.config.registry
if (
registry.get('username', None) and
registry.get('password', None) and
# SHA means it came from a git push (builder)
not self.build.sha and
# hostname tells Builder where to push images
not registry.get('hostname', None)
):
return self.build.image
# return image if it is already in a registry, test host and then host + port
if (
self.build.image.startswith(settings.REGISTRY_HOST) or
self.build.image.startswith(settings.REGISTRY_URL)
):
# strip registry information off first
image = self.build.image.replace('{}/'.format(settings.REGISTRY_URL), '')
return image.replace('{}/'.format(settings.REGISTRY_HOST), '')
# Sort out image information based on build type
if self.build.type == 'dockerfile':
# DockerFile
return '{}/{}:git-{}'.format(settings.REGISTRY_URL, self.app.id, str(self.build.sha))
elif self.build.type == 'image':
# Deis Pull, docker image in local registry
return '{}/{}:v{}'.format(settings.REGISTRY_URL, self.app.id, str(self.version))
elif self.build.type == 'buildpack':
# Build Pack - Registry URL not prepended since slugrunner image will download slug
return self.build.image
def new(self, user, config, build, summary=None, source_version='latest'):
"""
Create a new application release using the provided Build and Config
on behalf of a user.
Releases start at v1 and auto-increment.
"""
# construct fully-qualified target image
new_version = self.version + 1
# create new release and auto-increment version
release = Release.objects.create(
owner=user, app=self.app, config=config,
build=build, version=new_version, summary=summary
)
try:
release.publish()
except DeisException as e:
# If we cannot publish this app, just log and carry on
self.app.log(e)
pass
except RegistryException as e:
self.app.log(e)
raise DeisException(str(e)) from e
return release
def publish(self):
if self.build is None:
raise DeisException('No build associated with this release to publish')
# If the build has a SHA, assume it's from deis-builder and in the deis-registry already
if self.build.source_based:
return
# Builder pushes to internal registry, exclude SHA based images from being returned early
registry = self.config.registry
if (
registry.get('username', None) and
registry.get('password', None) and
# SHA means it came from a git push (builder)
not self.build.sha and
# hostname tells Builder where to push images
not registry.get('hostname', None)
):
self.app.log('{} exists in the target registry. Using image for release {} of app {}'.format(self.build.image, self.version, self.app)) # noqa
return
# return image if it is already in the registry, test host and then host + port
if (
self.build.image.startswith(settings.REGISTRY_HOST) or
self.build.image.startswith(settings.REGISTRY_URL)
):
self.app.log('{} exists in the target registry. Using image for release {} of app {}'.format(self.build.image, self.version, self.app)) # noqa
return
# add tag if it was not provided
source_image = self.build.image
if ':' not in source_image:
source_image = "{}:{}".format(source_image, self.build.version)
# if build is source based then it was pushed into the deis registry
deis_registry = bool(self.build.source_based)
publish_release(source_image, self.image, deis_registry, self.get_registry_auth())
def get_port(self):
"""
Get application port for a given release. If pulling from private registry
then use default port or read from ENV var, otherwise attempt to pull from
the docker image
"""
try:
deis_registry = bool(self.build.source_based)
envs = self.config.values
creds = self.get_registry_auth()
if self.build.type == "buildpack":
self.app.log('buildpack type detected. Defaulting to $PORT 5000')
return 5000
# application has registry auth - $PORT is required
if creds is not None:
if envs.get('PORT', None) is None:
self.app.log('Private registry detected but no $PORT defined. Defaulting to $PORT 5000', logging.WARNING) # noqa
return 5000
# User provided PORT
return int(envs.get('PORT'))
# If the user provides PORT
if envs.get('PORT', None):
return int(envs.get('PORT'))
# discover port from docker image
port = docker_get_port(self.image, deis_registry, creds)
if port is None:
msg = "Expose a port or make the app non routable by changing the process type"
self.app.log(msg, logging.ERROR)
raise DeisException(msg)
return port
except Exception as e:
raise DeisException(str(e)) from e
def get_registry_auth(self):
"""
Gather login information for private registry if needed
"""
auth = None
registry = self.config.registry
if registry.get('username', None):
auth = {
'username': registry.get('username', None),
'password': registry.get('password', None),
'email': self.owner.email
}
return auth
def previous(self):
"""
Return the previous Release to this one.
:return: the previous :class:`Release`, or None
"""
releases = self.app.release_set
if self.pk:
releases = releases.exclude(pk=self.pk)
try:
# Get the Release previous to this one
prev_release = releases.latest()
except Release.DoesNotExist:
prev_release = None
return prev_release
def rollback(self, user, version=None):
try:
# if no version is provided then grab version from object
version = (self.version - 1) if version is None else int(version)
if version < 1:
raise DeisException('version cannot be below 0')
elif version == 1:
raise DeisException('Cannot roll back to initial release.')
prev = self.app.release_set.get(version=version)
new_release = self.new(
user,
build=prev.build,
config=prev.config,
summary="{} rolled back to v{}".format(user, version),
source_version='v{}'.format(version)
)
if self.build is not None:
self.app.deploy(new_release, force_deploy=True)
return new_release
except Exception as e:
if 'new_release' in locals():
new_release.delete()
raise DeisException(str(e)) from e
def delete(self, *args, **kwargs):
"""Delete release DB record and any RCs from the affect release"""
try:
self._delete_release_in_scheduler(self.app.id, self.version)
except KubeHTTPException as e:
# 404 means they were already cleaned up
if e.response.status_code is not 404:
# Another problem came up
message = 'Could not to cleanup RCs for release {}'.format(self.version)
self.app.log(message, level=logging.WARNING)
logger.warning(message + ' - ' + str(e))
finally:
super(Release, self).delete(*args, **kwargs)
def cleanup_old(self, deployment=False):
"""Cleanup all but the latest release from Kubernetes"""
latest_version = 'v{}'.format(self.version)
self.app.log(
'Cleaning up RCs for releases older than {} (latest)'.format(latest_version),
level=logging.DEBUG
)
# Cleanup controllers
labels = {'heritage': 'deis'}
controller_removal = []
controllers = self._scheduler.get_rcs(self.app.id, labels=labels).json()
for controller in controllers['items']:
current_version = controller['metadata']['labels']['version']
# skip the latest release
if current_version == latest_version:
continue
# aggregate versions together to removal all at once
if current_version not in controller_removal:
controller_removal.append(current_version)
if controller_removal:
self.app.log(
'Found the following versions to cleanup: {}'.format(', '.join(controller_removal)), # noqa
level=logging.DEBUG
)
for version in controller_removal:
self._delete_release_in_scheduler(self.app.id, version)
# find stray env secrets to remove that may have been missed
self.app.log('Cleaning up orphaned environment var secrets', level=logging.DEBUG)
labels = {
'heritage': 'deis',
'app': self.app.id,
'type': 'env',
}
secrets = self._scheduler.get_secrets(self.app.id, labels=labels).json()
for secret in secrets['items']:
current_version = secret['metadata']['labels']['version']
# skip the latest release
if current_version == latest_version:
continue
self._scheduler.delete_secret(self.app.id, secret['metadata']['name'])
# Remove stray pods
labels = {'heritage': 'deis'}
pods = self._scheduler.get_pods(self.app.id, labels=labels).json()
for pod in pods['items']:
if self._scheduler.pod_deleted(pod):
continue
current_version = pod['metadata']['labels']['version']
# skip the latest release
if current_version == latest_version:
continue
self._scheduler.delete_pod(self.app.id, pod['metadata']['name'])
if deployment:
self._cleanup_deployment_secrets_and_configs(self.app.id)
def _cleanup_deployment_secrets_and_configs(self, namespace):
"""
Clean up any environment secrets (and in the future ConfigMaps) that
are tied to a release Deployments no longer track
This is done by checking the available ReplicaSets and only removing
objects not attached to anything. This will allow releases done outside
of Deis Controller
"""
# Find all ReplicaSets
versions = []
labels = {'heritage': 'deis', 'app': namespace}
replicasets = self._scheduler.get_replicasets(namespace, labels=labels).json()
for replicaset in replicasets['items']:
if (
'version' not in replicaset['metadata']['labels'] or
replicaset['metadata']['labels']['version'] in versions
):
continue
versions.append(replicaset['metadata']['labels']['version'])
# find all env secrets not owned by any replicaset
labels = {
'heritage': 'deis',
'app': namespace,
'type': 'env',
# http://kubernetes.io/docs/user-guide/labels/#set-based-requirement
'version__notin': versions
}
self.app.log('Cleaning up orphaned env var secrets for application {}'.format(namespace), level=logging.DEBUG) # noqa
secrets = self._scheduler.get_secrets(namespace, labels=labels).json()
for secret in secrets['items']:
self._scheduler.delete_secret(namespace, secret['metadata']['name'])
def _delete_release_in_scheduler(self, namespace, version):
"""
Deletes a specific release in k8s based on ReplicationController
Scale RCs to 0 then delete RCs / Deployments and the version specific
secret that container the env var
"""
labels = {
'heritage': 'deis',
'app': namespace,
'version': version
}
# see if the app config has deploy timeout preference, otherwise use global
deploy_timeout = self.config.values.get('DEIS_DEPLOY_TIMEOUT', settings.DEIS_DEPLOY_TIMEOUT) # noqa
controllers = self._scheduler.get_rcs(namespace, labels=labels).json()
for controller in controllers['items']:
self._scheduler.cleanup_release(namespace, controller, deploy_timeout)
# remove secret that contains env vars for the release
try:
secret_name = "{}-{}-env".format(namespace, version)
self._scheduler.delete_secret(namespace, secret_name)
except KubeHTTPException:
pass
def deployment_in_progress(self, namespace, name):
# see if there is a global or app specific setting to specify Deployments usage
deployments = bool(self.config.values.get('DEIS_KUBERNETES_DEPLOYMENTS', settings.DEIS_KUBERNETES_DEPLOYMENTS)) # noqa
if not deployments:
return False
try:
ready, _ = self._scheduler.are_deployment_replicas_ready(namespace, name)
return not ready
except KubeHTTPException as e:
# Deployment doesn't exist
if e.response.status_code == 404:
return False
def save(self, *args, **kwargs): # noqa
if not self.summary:
self.summary = ''
prev_release = self.previous()
# compare this build to the previous build
old_build = prev_release.build if prev_release else None
old_config = prev_release.config if prev_release else None
# if the build changed, log it and who pushed it
if self.version == 1:
self.summary += "{} created initial release".format(self.app.owner)
elif self.build != old_build:
if self.build.sha:
self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
else:
self.summary += "{} deployed {}".format(self.build.owner, self.build.image)
# if the config data changed, log the dict diff
if self.config != old_config:
# if env vars change, log the dict diff
dict1 = self.config.values
dict2 = old_config.values if old_config else {}
diff = dict_diff(dict1, dict2)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
# if the limits changed (memory or cpu), log the dict diff
changes = []
old_mem = old_config.memory if old_config else {}
diff = dict_diff(self.config.memory, old_mem)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('memory')
old_cpu = old_config.cpu if old_config else {}
diff = dict_diff(self.config.cpu, old_cpu)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('cpu')
if changes:
changes = 'changed limits for '+', '.join(changes)
self.summary += "{} {}".format(self.config.owner, changes)
# if the tags changed, log the dict diff
changes = []
old_tags = old_config.tags if old_config else {}
diff = dict_diff(self.config.tags, old_tags)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added tag ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed tag ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted tag ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
# if the registry information changed, log the dict diff
changes = []
old_registry = old_config.registry if old_config else {}
diff = dict_diff(self.config.registry, old_registry)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added registry info ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed registry info ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted registry info ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
# if the registry information changed, log the dict diff
changes = []
old_healthcheck = old_config.healthcheck if old_config else {}
diff = dict_diff(self.config.healthcheck, old_healthcheck)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added healthcheck info ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed healthcheck info ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted healthcheck info ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
if not self.summary:
if self.version == 1:
self.summary = "{} created the initial release".format(self.owner)
else:
# There were no changes to this release
raise AlreadyExists("{} changed nothing - release stopped".format(self.owner))
super(Release, self).save(*args, **kwargs)