-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathconfig.py
282 lines (231 loc) · 9.72 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
# Copyright ClusterHQ Inc. See LICENSE file for details.
# -*- python -*-
# ex: set syntax=python:
import sys
from os.path import dirname
from tempfile import NamedTemporaryFile
from buildbot.buildslave import BuildSlave
from buildbot.manhole import AuthorizedKeysManhole
from buildbot.status.web import authz
from buildbot.status.web.auth import BasicAuth
import jinja2
from twisted.internet import reactor
from twisted.python.filepath import FilePath
from twisted.python.util import sibpath
sys.path.insert(0, dirname(__file__))
del sys, dirname
from flocker_bb import privateData
from flocker_bb.boxes import FlockerWebStatus as WebStatus
from flocker_bb.builders import flocker, maint, flocker_acceptance
from flocker_bb.ec2 import rackspace_slave, ec2_slave
from flocker_bb.github import createGithubStatus
from flocker_bb.monitoring import Monitor
from flocker_bb.password import generate_password
from flocker_bb.zulip import createZulip
from flocker_bb.zulip_status import createZulipStatus
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
# Some credentials
USER = privateData['auth']['user'].encode("utf-8")
PASSWORD = privateData['auth']['password'].encode("utf-8")
if 'zulip' in privateData:
ZULIP_BOT = privateData['zulip']['user']
ZULIP_KEY = privateData['zulip']['password']
zulip = createZulip(reactor, ZULIP_BOT, ZULIP_KEY)
def maybeAddManhole(config, privateData):
try:
manhole = privateData['manhole']
except KeyError:
return
with NamedTemporaryFile(delete=False) as authorized_keys:
authorized_keys.write(
"\n".join(manhole['authorized_keys']) + "\n"
)
c['manhole'] = AuthorizedKeysManhole(manhole['port'], authorized_keys.name)
maybeAddManhole(c, privateData)
# 'slavePortnum' defines the TCP port to listen on for connections from slaves.
# This must match the value configured into the buildslaves (with their
# --master option)
c['slavePortnum'] = 9989
cloudInit = FilePath(__file__).sibling("slave").child(
"cloud-init.sh").getContent()
c['slaves'] = []
SLAVENAMES = {}
def get_cloud_init(name, base, password, provider, privateData, slavePortnum):
"""
:param bytes name: The name of the buildslave.
:param bytes base: The name of image used for the buildslave.
:param bytes password: The password that the buildslave will use to
authenticate with the buildmaster.
:param bytes provider: The cloud ``provider`` hosting the buildslave. This
is added to an environment variable, so that cloud ``provider``
specific tests know which cloud authentication plugin to load and which
credentials to load from the credentials file.
:param dict privateData: The non-slave specific keys and values from the
buildbot ``config.yml`` file.
:parma int slavePortnum: The TCP port number on the buildmaster to which
the buildslave will connect.
:returns: The ``bytes`` of a ``cloud-init.sh`` script which can be supplied
as ``userdata`` when creating an on-demand buildslave.
"""
return cloudInit % {
"github_token": privateData['github']['token'],
"name": name,
"base": base,
"password": password,
"FLOCKER_FUNCTIONAL_TEST_CLOUD_PROVIDER": provider,
'buildmaster_host': privateData['buildmaster']['host'],
'buildmaster_port': slavePortnum,
'acceptance.yml': privateData['acceptance'].get('config', ''),
'acceptance-ssh-key': privateData['acceptance'].get('ssh-key', ''),
}
for base, slaveConfig in privateData['slaves'].items():
if "openstack-image" in slaveConfig:
# Give this multi-slave support like the EC2 implementation below.
# FLOC-1907
password = generate_password(32)
SLAVENAMES[base] = []
SLAVENAMES[base].append(base)
# Factor the repetition out of this section and the ec2_slave call
# below. Maybe something like ondemand_slave(rackspace_driver, ...)
# FLOC-1908
slave = rackspace_slave(
name=base,
password=password,
config=slaveConfig,
credentials=privateData['rackspace'],
user_data=get_cloud_init(
base, base, password,
provider="openstack",
privateData=privateData,
slavePortnum=c['slavePortnum'],
),
build_wait_timeout=50*60,
keepalive_interval=60,
buildmaster=privateData['buildmaster']['host'],
max_builds=slaveConfig.get('max_builds'),
)
c['slaves'].append(slave)
elif 'ami' in slaveConfig:
if base not in SLAVENAMES:
SLAVENAMES[base] = []
for index in range(slaveConfig['slaves']):
name = '%s/%d' % (base, index)
password = generate_password(32)
SLAVENAMES[base].append(name)
slave = ec2_slave(
name=name,
password=password,
config=slaveConfig,
credentials=privateData['aws'],
user_data=get_cloud_init(
name, base, password,
provider="aws",
privateData=privateData,
slavePortnum=c['slavePortnum'],
),
region='us-west-2',
keypair_name='hybrid-master',
security_name='ssh',
build_wait_timeout=50*60,
keepalive_interval=60,
buildmaster=privateData['buildmaster']['host'],
max_builds=slaveConfig.get('max_builds'),
)
c['slaves'].append(slave)
else:
for index, password in enumerate(slaveConfig['passwords']):
name = '%s/%d' % (base, index)
if 'aws/rhel-7.2' in base:
# Use AWS RHEL buildslave as AWS CentOS buildslave.
if 'aws/centos-7' not in SLAVENAMES:
SLAVENAMES['aws/centos-7'] = [name]
else:
SLAVENAMES['aws/centos-7'].append(name)
else:
SLAVENAMES[base] = [name]
c['slaves'].append(BuildSlave(
name, password=password,
max_builds=slaveConfig.get('max_builds'),
))
# A codebase generator synthesizes an internal identifier from a ... change.
# The codebase identifier lets parts of the build configuration more easily
# inspect attributes of the change without ambiguity when there are potentially
# multiple change sources (eg, multiple repositories being fetched for a single
# build).
FLOCKER_REPOSITORY = "[email protected]:ClusterHQ/flocker.git"
CODEBASES = {
FLOCKER_REPOSITORY: "flocker",
"https://github.com/ClusterHQ/flocker": "flocker",
}
c['codebaseGenerator'] = lambda change: CODEBASES[change["repository"]]
c['change_source'] = []
c['builders'] = []
c['schedulers'] = []
def addBuilderModule(module):
c['builders'].extend(module.getBuilders(SLAVENAMES))
c['schedulers'].extend(module.getSchedulers())
addBuilderModule(flocker)
addBuilderModule(flocker_acceptance)
addBuilderModule(maint)
# 'status' is a list of Status Targets. The results of each build will be
# pushed to these targets. buildbot/status/*.py has a variety to choose from,
# including web pages, email senders, and IRC bots.
failing_builders = frozenset(privateData.get('failing_builders', ()))
c['status'] = []
if privateData['github']['report_status']:
c['status'].append(createGithubStatus(
'flocker', token=privateData['github']['token'],
failing_builders=failing_builders,
))
c['status'].append(Monitor())
authz_cfg = authz.Authz(
auth=BasicAuth([(USER, PASSWORD)]),
# The "auth" value lines up with the `auth` keyword argument above!
forceBuild='auth',
forceAllBuilds='auth',
stopBuild='auth',
# Leave all this stuff disabled for now, but maybe enable it with "auth"
# later.
gracefulShutdown=False,
pingBuilder='auth',
stopAllBuilds=False,
cancelPendingBuild='auth',
cancelAllPendingBuilds=False,
)
c['status'].append(WebStatus(
http_port=80, authz=authz_cfg,
public_html=sibpath(__file__, 'public_html'),
jinja_loaders=[jinja2.FileSystemLoader(sibpath(__file__, 'templates'))],
change_hook_dialects={'github': True},
failing_builders=failing_builders,
))
if 'zulip' in privateData:
ZULIP_STREAM = privateData['zulip'].get('stream', u"BuildBot")
CRITICAL_STREAM = privateData['zulip'].get('critical_stream',
u"Engineering")
c['status'].append(createZulipStatus(
zulip, ZULIP_STREAM, CRITICAL_STREAM,
failing_builders=failing_builders,
))
# the 'title' string will appear at the top of this buildbot
# installation's html.WebStatus home page (linked to the
# 'titleURL') and is embedded in the title of the waterfall HTML page.
c['title'] = "ClusterHQ"
c['titleURL'] = "http://www.clusterhq.com/"
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server (usually the html.WebStatus page) is visible. This
# typically uses the port number set in the Waterfall 'status' entry, but
# with an externally-visible host name which the buildbot cannot figure out
# without some help.
c['buildbotURL'] = "http://%s/" % (privateData['buildmaster']['host'],)
# This specifies what database buildbot uses to store change and scheduler
# state. You can leave this at its default for all but the largest
# installations.
c['db_url'] = "sqlite:///" + sibpath(__file__, "data/state.sqlite")
# Keep a bunch of build in memory rather than constantly re-reading them from
# disk.
c['buildCacheSize'] = 1000
# Cleanup old builds.
c['buildHorizon'] = 1000