-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathresource.py
More file actions
256 lines (235 loc) · 10.1 KB
/
resource.py
File metadata and controls
256 lines (235 loc) · 10.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
import logging
from functools import cmp_to_key
from django.db import models, transaction
from django.contrib.auth import get_user_model
from api.exceptions import DryccException, AlreadyExists, ServiceUnavailable
from api.utils import validate_label
from scheduler import KubeException
from .base import UuidAuditedModel
User = get_user_model()
logger = logging.getLogger(__name__)
class Resource(UuidAuditedModel):
owner = models.ForeignKey(User, on_delete=models.PROTECT)
app = models.ForeignKey('App', on_delete=models.CASCADE)
name = models.CharField(max_length=63, validators=[validate_label])
plan = models.CharField(max_length=128)
data = models.JSONField(default=dict, blank=True)
status = models.TextField(blank=True, null=True)
binding = models.TextField(blank=True, null=True)
options = models.JSONField(default=dict, blank=True)
class Meta:
get_latest_by = 'created'
unique_together = (('app', 'name'),)
ordering = ['-created']
def __str__(self):
return self.name
@transaction.atomic
def save(self, *args, **kwargs):
# Attach ServiceInstance, updates k8s
if self.created == self.updated:
self.attach(*args, **kwargs)
# Save to DB
return super(Resource, self).save(*args, **kwargs)
@classmethod
def services(cls):
services = []
for serviceclass in cls.scheduler().svcat.get_serviceclasses().json()["items"]:
services.append({
"id": serviceclass["spec"]["externalID"],
"name": serviceclass["spec"]["externalName"],
"updateable": serviceclass["spec"]["planUpdatable"],
})
services.sort(key=lambda service: service["name"])
return services
@classmethod
def plans(cls, serviceclass_name):
serviceclass_id = None
for service in cls.services():
if service["name"] == serviceclass_name:
serviceclass_id = service["id"]
break
plans = []
if serviceclass_id is not None:
for serviceplan in cls.scheduler().svcat.get_serviceplans().json()["items"]:
if serviceplan["spec"]["clusterServiceClassRef"]["name"] == serviceclass_id:
plans.append({
"id": serviceplan["spec"]["externalID"],
"name": serviceplan["spec"]["externalName"],
"description": serviceplan["spec"]["description"],
})
plans.sort(key=cmp_to_key(
lambda p1, p2: len(p1["name"]) - len(p2["name"])
if len(p1["name"]) != len(p2["name"])
else (1 if p1["name"] > p2["name"] else -1)
))
return plans
def attach(self, *args, **kwargs):
try:
self.scheduler().svcat.get_instance(self.app.id, self.name)
err = "Resource {} already exists in this namespace".format(self.name) # noqa
self.log(err, logging.INFO)
raise AlreadyExists(err)
except KubeException as e:
logger.info(e)
try:
instance = self.plan.split(":")
kwargs = {
"instance_class": instance[0],
"instance_plan": ":".join(instance[1:]),
"parameters": self.options,
}
self.scheduler().svcat.create_instance(
self.app.id, self.name, **kwargs
)
except KubeException as e:
msg = 'There was a problem creating the resource ' \
'{} for {}'.format(self.name, self.app_id)
raise ServiceUnavailable(msg) from e
@transaction.atomic
def delete(self, *args, **kwargs):
if self.binding == "Ready":
raise DryccException("the resource instance is still binding")
if self.status == "Provisioning":
raise DryccException("the resource instance is provisioning")
# Deatch ServiceInstance, updates k8s
self.detach(*args, **kwargs)
# Delete from DB
return super(Resource, self).delete(*args, **kwargs)
def detach(self, *args, **kwargs):
try:
resp = self.scheduler().svcat.get_instance(
self.app.id, self.name, ignore_exception=True)
if resp.status_code != 404:
self.scheduler().svcat.delete_instance(self.app.id, self.name)
except KubeException as e:
raise ServiceUnavailable("Could not delete resource {} for application {}".format(self.name, self.app_id)) from e # noqa
def log(self, message, level=logging.INFO):
"""Logs a message in the context of this service.
This prefixes log messages with an application "tag" that the customized
drycc-logspout will be on the lookout for. When it's seen, the message-- usually
an application event of some sort like releasing or scaling, will be considered
as "belonging" to the application instead of the controller and will be handled
accordingly.
"""
logger.log(level, "[{}]: {}".format(self.app.id, message))
def bind(self, *args, **kwargs):
if self.status != "Ready":
raise DryccException("the resource instance is not ready")
if self.binding == "Ready":
raise DryccException("the resource instance is binding")
self.binding = "Binding"
self.save()
try:
self.scheduler().svcat.get_binding(self.app.id, self.name)
err = "Resource {} is binding".format(self.name)
self.log(err, logging.INFO)
raise AlreadyExists(err)
except KubeException as e:
logger.info(e)
try:
self.scheduler().svcat.create_binding(
self.app.id, self.name, **kwargs)
except KubeException as e:
msg = 'There was a problem binding the resource ' \
'{} for {}'.format(self.name, self.app_id)
raise ServiceUnavailable(msg) from e
def unbind(self, *args, **kwargs):
if not self.binding:
raise DryccException("the resource instance is not binding")
try:
# We raise an exception when a resource doesn't exist
self.scheduler().svcat.get_binding(self.app.id, self.name)
self.scheduler().svcat.delete_binding(self.app.id, self.name)
self.binding = None
self.data = {}
self.save()
except KubeException as e:
raise ServiceUnavailable("Could not unbind resource {} for application {}".format(self.name, self.app_id)) from e # noqa
def attach_update(self, *args, **kwargs):
try:
data = self.scheduler().svcat.get_instance(
self.app.id, self.name).json()
except KubeException as e:
logger.debug(e)
self.DryccException("resource {} does not exist".format(self.name))
try:
version = data["metadata"]["resourceVersion"]
instance = self.plan.split(":")
kwargs = {
"instance_class": instance[0],
"instance_plan": ":".join(instance[1:]),
"parameters": self.options,
"external_id": data["spec"]["externalID"]
}
self.scheduler().svcat.patch_instance(
self.app.id, self.name, version, **kwargs
)
except KubeException as e:
msg = 'There was a problem update the resource ' \
'{} for {}'.format(self.name, self.app_id)
raise ServiceUnavailable(msg) from e
@property
def message(self):
try:
resp = self.scheduler().svcat.get_instance(
self.app.id, self.name)
if resp.status_code != 200:
message = ""
message = resp.json().get("status", {}).get("conditions", [{}])[-1].\
get("message", "")
return message
except KubeException as e:
logger.info("retrieve instance info error: {}".format(e))
return ""
def retrieve(self, *args, **kwargs):
if self._retrieve_status() or self._retrieve_binding():
self.save()
return self.status == self.binding == "Ready"
def to_measurements(self, timestamp: float):
return [{
"app_id": str(self.app_id),
"owner": self.owner_id,
"name": self.plan,
"type": "resource",
"unit": "number",
"usage": 1,
"kwargs": {
"name": self.name,
},
"timestamp": int(timestamp)
}]
def _retrieve_status(self):
changed = False
try:
response = self.scheduler().svcat.get_instance(
self.app.id, self.name).json()
status = response.get('status', {}).get('lastConditionState')
options = response.get('spec', {}).get('parameters', {})
if self.status != status:
self.status = status
changed = True
if self.options != options:
self.options = options
changed = True
except KubeException as e:
logger.info("retrieve instance info error: {}".format(e))
return changed
def _retrieve_binding(self):
changed = False
try:
# We raise an exception when a resource doesn't exist
response = self.scheduler().svcat.get_binding(self.app.id, self.name).json()
binding = response.get('status', {}).get('lastConditionState')
secret_name = response.get('spec', {}).get('secretName')
if self.binding != binding:
self.binding = binding
changed = True
if secret_name:
response = self.scheduler().secret.get(self.app.id, secret_name).json()
data = response.get('data', {})
if self.data != data:
self.data = data
changed = True
except KubeException as e:
logger.info("retrieve binding info error: {}".format(e))
return changed