-
Notifications
You must be signed in to change notification settings - Fork 112
Expand file tree
/
Copy pathchef.py
More file actions
207 lines (166 loc) · 6.38 KB
/
chef.py
File metadata and controls
207 lines (166 loc) · 6.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
from __future__ import unicode_literals
import json
import os.path
import re
import subprocess
import time
from celery.canvas import group
from api.ssh import exec_ssh, connect_ssh
from cm.chef_api import ChefAPI
CHEF_CONFIG_PATH = '/etc/chef'
CHEF_INSTALL_TYPE = 'gems'
CHEF_RUBY_VERSION = '1.9.1'
CHEF_ENVIRONMENT = '_default'
CHEF_CLIENT_VERSION = '11.4.4'
# load chef config using CHEF_CONFIG_PATH
try:
# parse controller's chef config for server_url and client_name
_client_cfg_path = os.path.join(CHEF_CONFIG_PATH, 'client.rb')
if not os.path.exists(_client_cfg_path):
raise EnvironmentError('Could not find {}'.format(_client_cfg_path))
with open(_client_cfg_path) as f:
_data = f.read()
# construct a dict from the ruby client.rb
_d = {}
for m in re.findall(r'''^([a-zA-Z0-9_]+)[ \t]+(.*)$''',
_data, re.MULTILINE):
_d[m[0]] = m[1].strip("'").strip('"')
# set global variables from client.rb
CHEF_SERVER_URL = _d['chef_server_url']
CHEF_NODE_NAME = _d['node_name']
CHEF_CLIENT_NAME = _d['node_name']
CHEF_VALIDATION_NAME = _d['validation_client_name']
# read the client key
_client_pem_path = os.path.join(CHEF_CONFIG_PATH, 'client.pem')
CHEF_CLIENT_KEY = subprocess.check_output(
['sudo', '/bin/cat', _client_pem_path]).strip('\n')
# read the validation key
_valid_pem_path = os.path.join(CHEF_CONFIG_PATH, 'validation.pem')
CHEF_VALIDATION_KEY = subprocess.check_output(
['sudo', '/bin/cat', _valid_pem_path]).strip('\n')
except Exception as e:
raise EnvironmentError('Failed to auto-configure Chef -- {}'.format(e))
def _get_client():
"""
Return a new instance of a Chef API Client
"""
return ChefAPI(CHEF_SERVER_URL, CHEF_CLIENT_NAME, CHEF_CLIENT_KEY)
def configure_node(node):
config = node.layer.config.copy()
# http://cloudinit.readthedocs.org/en/latest/topics/examples.html#install-and-run-chef-recipes
chef = config['chef'] = {}
chef['node_name'] = node.id
# if run_list specified in layer, use it (assumes csv)
run_list = node.layer.config.get('run_list', [])
if run_list:
chef['run_list'] = run_list.split(',')
# otherwise construct a run_list using proxy/runtime flags
else:
run_list = ['recipe[deis]']
if node.layer.runtime is True:
run_list.append('recipe[deis::runtime]')
if node.layer.proxy is True:
run_list.append('recipe[deis::proxy]')
chef['run_list'] = run_list
attrs = node.layer.config.get('initial_attributes')
if attrs:
chef['initial_attributes'] = attrs
# add global chef config
chef['version'] = CHEF_CLIENT_VERSION
chef['ruby_version'] = CHEF_RUBY_VERSION
chef['server_url'] = CHEF_SERVER_URL
chef['install_type'] = CHEF_INSTALL_TYPE
chef['environment'] = CHEF_ENVIRONMENT
chef['validation_name'] = CHEF_VALIDATION_NAME
chef['validation_key'] = CHEF_VALIDATION_KEY
return config
def bootstrap_node(node):
# loop until node is registered with chef
# if chef bootstrapping fails, the node will not complete registration
registered = False
while not registered:
# reinstatiate the client on each poll attempt
# to avoid disconnect errors
client = _get_client()
resp, status = client.get_node(node.id)
if status == 200:
body = json.loads(resp)
# wait until idletime is not null
# meaning the node is registered
if body.get('automatic', {}).get('idletime'):
break
time.sleep(5)
return node
def destroy_node(node):
"""
Purge the Node & Client records from Chef Server
"""
client = _get_client()
client.delete_node(node.id)
client.delete_client(node.id)
return node
def update_user(user):
client = _get_client()
# client.create_databag_item('deis-users', user.username, user.calculate())
client.update_databag_item('deis-users', user.username, user.calculate())
def update_app(app):
client = _get_client()
client.update_databag_item('deis-apps', app.id, app.calculate())
def update_formation(formation, client):
client.update_databag_item('deis-formations', formation.id, formation.calculate())
def converge_controller():
# NOTE: converging the controller can overwrite any in-place
# changes to application code
try:
return subprocess.check_output(
['sudo', 'chef-client', '--override-runlist', 'recipe[deis::gitosis]'])
except subprocess.CalledProcessError as e:
print(e)
print(e.output)
raise e
def converge_node(node):
ssh = connect_ssh(node.layer.ssh_username,
node.fqdn, 22,
node.layer.ssh_private_key)
output, rc = exec_ssh(ssh, 'sudo chef-client')
return output, rc
def converge_formation(formation):
nodes = formation.node_set.all()
subtasks = []
for n in nodes:
subtask = converge_node.s(n.id,
n.layer.flavor.ssh_username,
n.fqdn,
n.layer.flavor.ssh_private_key)
subtasks.append(subtask)
job = group(*subtasks)
return job.apply_async().join()
def publish_user(username, data):
_publish('deis-users', username, data)
return username
def publish_app(app_id, data):
_publish('deis-apps', app_id, data)
return app_id
def purge_app(app_id):
_purge('deis-apps', app_id)
return app_id
def publish_formation(formation_id, data):
_publish('deis-formations', formation_id, data)
return formation_id
def purge_formation(formation_id):
_purge('deis-formations', formation_id)
return formation_id
def _publish(data_bag, item_name, item_value):
client = _get_client()
body, status = client.update_databag_item(data_bag, item_name, item_value)
if status != 200:
body, status = client.create_databag_item(data_bag, item_name, item_value)
if status != 201:
raise RuntimeError('Could not publish {item_name}: {body}'.format(**locals()))
return body, status
def _purge(databag_name, item_name):
client = _get_client()
body, status = client.delete_databag_item(databag_name, item_name)
if status == 200 or status == 404:
return body, status
raise RuntimeError('Could not purge {item_name}: {body}'.format(**locals()))