diff --git a/runbot/container.py b/runbot/container.py
index 7796e6076..78cae273b 100644
--- a/runbot/container.py
+++ b/runbot/container.py
@@ -14,6 +14,7 @@
import os
import re
import subprocess
+import time
import warnings
# unsolved issue https://github.com/docker/docker-py/issues/2928
@@ -27,7 +28,7 @@
_logger = logging.getLogger(__name__)
-
+docker_stop_failures = {}
class Command():
@@ -114,7 +115,6 @@ def _docker_build(build_dir, image_tag):
_logger.error('Build of image %s failed with this BUILD error:', image_tag)
msg = f"{e.msg}\n{''.join(l.get('stream') or '' for l in e.build_log)}"
return (False, msg)
- _logger.info('Dockerfile %s finished build', image_tag)
return (True, None)
@@ -203,6 +203,13 @@ def _docker_stop(container_name, build_dir):
"""Stops the container named container_name"""
container_name = sanitize_container_name(container_name)
_logger.info('Stopping container %s', container_name)
+ if container_name in docker_stop_failures:
+ if docker_stop_failures[container_name] + 60 * 60 < time.time():
+ _logger.warning('Removing %s from docker_stop_failures', container_name)
+ del docker_stop_failures[container_name]
+ else:
+ _logger.warning('Skipping %s, is in failure', container_name)
+ return
docker_client = docker.from_env()
if build_dir:
end_file = os.path.join(build_dir, 'end-%s' % container_name)
@@ -212,11 +219,15 @@ def _docker_stop(container_name, build_dir):
try:
container = docker_client.containers.get(container_name)
container.stop(timeout=1)
+ return
except docker.errors.NotFound:
_logger.error('Cannnot stop container %s. Container not found', container_name)
except docker.errors.APIError as e:
_logger.error('Cannnot stop container %s. API Error "%s"', container_name, e)
+ docker_stop_failures[container_name] = time.time()
+
+
def docker_state(container_name, build_dir):
container_name = sanitize_container_name(container_name)
exist = os.path.exists(os.path.join(build_dir, 'exist-%s' % container_name))
diff --git a/runbot/models/build.py b/runbot/models/build.py
index 3d3cf6a61..a6451d7ae 100644
--- a/runbot/models/build.py
+++ b/runbot/models/build.py
@@ -109,11 +109,11 @@ def _compute_commit_ids(self):
def create(self, values):
params = self.new(values)
- match = self._find_existing(params.fingerprint)
- if match:
- return match
- values = self._convert_to_write(params._cache)
- return super().create(values)
+ record = self._find_existing(params.fingerprint)
+ if not record:
+ values = self._convert_to_write(params._cache)
+ record = super().create(values)
+ return record
def _find_existing(self, fingerprint):
return self.env['runbot.build.params'].search([('fingerprint', '=', fingerprint)], limit=1)
@@ -156,10 +156,10 @@ class BuildResult(models.Model):
trigger_id = fields.Many2one('runbot.trigger', related='params_id.trigger_id', store=True, index=True)
# state machine
- global_state = fields.Selection(make_selection(state_order), string='Status', compute='_compute_global_state', store=True, recursive=True)
+ global_state = fields.Selection(make_selection(state_order), string='Status', default='pending', required=True)
local_state = fields.Selection(make_selection(state_order), string='Build Status', default='pending', required=True, index=True)
- global_result = fields.Selection(make_selection(result_order), string='Result', compute='_compute_global_result', store=True, recursive=True)
- local_result = fields.Selection(make_selection(result_order), string='Build Result')
+ global_result = fields.Selection(make_selection(result_order), string='Result')
+ local_result = fields.Selection(make_selection(result_order), string='Build Result', default='ok')
triggered_result = fields.Selection(make_selection(result_order), string='Triggered Result') # triggered by db only
requested_action = fields.Selection([('wake_up', 'To wake up'), ('deathrow', 'To kill')], string='Action requested', index=True)
@@ -226,6 +226,7 @@ class BuildResult(models.Model):
killable = fields.Boolean('Killable')
database_ids = fields.One2many('runbot.database', 'build_id')
+ commit_export_ids = fields.One2many('runbot.commit.export', 'build_id')
static_run = fields.Char('Static run URL')
@@ -240,20 +241,6 @@ def _compute_log_list(self): # storing this field because it will be access trh
build.log_list = ','.join({step.name for step in build.params_id.config_id.step_ids() if step._has_log()})
# TODO replace logic, add log file to list when executed (avoid 404, link log on docker start, avoid fake is_docker_step)
- @api.depends('children_ids.global_state', 'local_state')
- def _compute_global_state(self):
- for record in self:
- waiting_score = record._get_state_score('waiting')
- children_ids = [child for child in record.children_ids if not child.orphan_result]
- if record._get_state_score(record.local_state) > waiting_score and children_ids: # if finish, check children
- children_state = record._get_youngest_state([child.global_state for child in children_ids])
- if record._get_state_score(children_state) > waiting_score:
- record.global_state = record.local_state
- else:
- record.global_state = 'waiting'
- else:
- record.global_state = record.local_state
-
@api.depends('gc_delay', 'job_end')
def _compute_gc_date(self):
icp = self.env['ir.config_parameter'].sudo()
@@ -285,22 +272,6 @@ def _get_youngest_state(self, states):
def _get_state_score(self, result):
return state_order.index(result)
- @api.depends('children_ids.global_result', 'local_result', 'children_ids.orphan_result')
- def _compute_global_result(self):
- for record in self:
- if record.local_result and record._get_result_score(record.local_result) >= record._get_result_score('ko'):
- record.global_result = record.local_result
- else:
- children_ids = [child for child in record.children_ids if not child.orphan_result]
- if children_ids:
- children_result = record._get_worst_result([child.global_result for child in children_ids], max_res='ko')
- if record.local_result:
- record.global_result = record._get_worst_result([record.local_result, children_result])
- else:
- record.global_result = children_result
- else:
- record.global_result = record.local_result
-
def _get_worst_result(self, results, max_res=False):
results = [result for result in results if result] # filter Falsy values
index = max([self._get_result_score(result) for result in results]) if results else 0
@@ -326,20 +297,49 @@ def copy_data(self, default=None):
})
return [values]
+ @api.model_create_multi
+ def create(self, vals_list):
+ for values in vals_list:
+ if 'local_state' in values:
+ values['global_state'] = values['local_state']
+ if 'local_result' in values:
+ values['global_result'] = values['local_result']
+ records = super().create(vals_list)
+ if records.parent_id:
+ records.parent_id._update_global_state()
+ records.parent_id._update_global_result()
+ return records
+
def write(self, values):
# some validation to ensure db consistency
if 'local_state' in values:
if values['local_state'] == 'done':
- self.env['runbot.commit.export'].search([('build_id', 'in', self.ids)]).unlink()
- local_result = values.get('local_result')
- for build in self:
- if local_result and local_result != self._get_worst_result([build.local_result, local_result]): # dont write ok on a warn/error build
- if len(self) == 1:
- values.pop('local_result')
- else:
- raise ValidationError('Local result cannot be set to a less critical level')
- res = super(BuildResult, self).write(values)
- return res
+ self.filtered(lambda b: b.local_state != 'done').commit_export_ids.unlink()
+
+ # don't update if the value doesn't change to avoid triggering concurrent updates
+ def minimal_update(records, field_name):
+ updated = self.browse()
+ if field_name in values:
+ field_result = values.pop(field_name)
+ updated = records.filtered(lambda b: (b[field_name] != field_result))
+ if updated:
+ super(BuildResult, updated).write({field_name: field_result})
+ return updated
+
+ # local result is a special case since we don't only want to avoid an update if the value didn't change, but also if the value is less than the previous one
+ # example: don't write 'ok' if result is 'ko' or 'warn'
+ if 'local_result' in values:
+ to_update = self.filtered(lambda b: (self._get_result_score(values['local_result']) > self._get_result_score(b.local_result)))
+ updated = minimal_update(to_update, 'local_result')
+ updated._update_global_result()
+ minimal_update(self, 'global_result').parent_id._update_global_result()
+ minimal_update(self, 'local_state')._update_global_state()
+ minimal_update(self, 'global_state').parent_id._update_global_state()
+ minimal_update(self, 'orphan_result').parent_id._update_global_result()
+
+ if values:
+ super().write(values)
+ return True
def _add_child(self, param_values, orphan=False, description=False, additionnal_commit_links=False):
@@ -373,11 +373,6 @@ def result_multi(self):
return 'warning'
return 'ko' # ?
- def update_build_end(self):
- for build in self:
- build.build_end = now()
- if build.parent_id and build.parent_id.local_state in ('running', 'done'):
- build.parent_id.update_build_end()
@api.depends('params_id.version_id.name')
def _compute_dest(self):
@@ -451,8 +446,6 @@ def _rebuild(self, message=None):
self.orphan_result = True
new_build = self.create(values)
- if self.parent_id:
- new_build._github_status()
user = request.env.user if request else self.env.user
new_build._log('rebuild', 'Rebuild initiated by %s%s' % (user.name, (' :%s' % message) if message else ''))
@@ -590,34 +583,21 @@ def _get_docker_name(self):
self.ensure_one()
return '%s_%s' % (self.dest, self.active_step.name)
- def _init_pendings(self, host):
- for build in self:
- if build.local_state != 'pending':
- raise UserError("Build %s is not pending" % build.id)
- if build.host != host.name:
- raise UserError("Build %s does not have correct host" % build.id)
- # allocate port and schedule first job
- values = {
- 'port': self._find_port(),
- 'job_start': now(),
- 'build_start': now(),
- 'job_end': False,
- }
- values.update(build._next_job_values())
- build.write(values)
- if not build.active_step:
- build._log('_schedule', 'No job in config, doing nothing')
- build.local_result = 'warn'
- continue
- try:
- build._log('_schedule', 'Init build environment with config %s ' % build.params_id.config_id.name)
- os.makedirs(build._path('logs'), exist_ok=True)
- except Exception:
- _logger.exception('Failed initiating build %s', build.dest)
- build._log('_schedule', 'Failed initiating build')
- build._kill(result='ko')
- continue
- build._run_job()
+ def _init_pendings(self):
+ self.ensure_one()
+ build = self
+ build.port = self._find_port()
+ build.job_start = now()
+ build.build_start = now()
+ build.job_end = False
+ build._log('_schedule', 'Init build environment with config %s ' % build.params_id.config_id.name)
+ try:
+ build._log('_schedule', 'Init build environment with config %s ' % build.params_id.config_id.name)
+ os.makedirs(build._path('logs'), exist_ok=True)
+ except Exception:
+ _logger.exception('Failed initiating build %s', build.dest)
+ build._log('_schedule', 'Failed initiating build')
+ build._kill(result='ko')
def _process_requested_actions(self):
for build in self:
@@ -629,12 +609,15 @@ def _process_requested_actions(self):
continue
if build.requested_action == 'wake_up':
- if docker_state(build._get_docker_name(), build._path()) == 'RUNNING':
- build.write({'requested_action': False, 'local_state': 'running'})
- build._log('wake_up', 'Waking up failed, **docker is already running**', log_type='markdown', level='SEPARATOR')
+ if build.local_state != 'done':
+ build.requested_action = False
+ build._log('wake_up', 'Impossible to wake-up, build is not done', log_type='markdown', level='SEPARATOR')
elif not os.path.exists(build._path()):
- build.write({'requested_action': False, 'local_state': 'done'})
+ build.requested_action = False
build._log('wake_up', 'Impossible to wake-up, **build dir does not exists anymore**', log_type='markdown', level='SEPARATOR')
+ elif docker_state(build._get_docker_name(), build._path()) == 'RUNNING':
+ build.write({'requested_action': False, 'local_state': 'running'})
+ build._log('wake_up', 'Waking up failed, **docker is already running**', log_type='markdown', level='SEPARATOR')
else:
try:
log_path = build._path('logs', 'wake_up.txt')
@@ -654,7 +637,7 @@ def _process_requested_actions(self):
run_step = step_ids[-1]
else:
run_step = self.env.ref('runbot.runbot_build_config_step_run')
- run_step._run_step(build, log_path, force=True)
+ run_step._run_step(build, log_path, force=True)()
# reload_nginx will be triggered by _run_run_odoo
except Exception:
_logger.exception('Failed to wake up build %s', build.dest)
@@ -662,48 +645,76 @@ def _process_requested_actions(self):
build.write({'requested_action': False, 'local_state': 'done'})
continue
+ def _update_global_result(self):
+ for record in self:
+ children = record.children_ids.filtered(lambda child: not child.orphan_result)
+ global_result = record.local_result
+ if children:
+ child_result = record._get_worst_result(children.mapped('global_result'), max_res='ko')
+ global_result = record._get_worst_result([record.local_result, child_result])
+ if global_result != record.global_result:
+ record.global_result = global_result
+ if not record.parent_id:
+ record._github_status() # failfast
+
+ def _update_global_state(self):
+ for record in self:
+ children = record.children_ids.filtered(lambda child: not child.orphan_result)
+ init_state = record.global_state
+ testing_children = any(child.global_state not in ('running', 'done') for child in children)
+ global_state = record.local_state
+ if testing_children:
+ child_state = 'waiting'
+ global_state = record._get_youngest_state([record.local_state, child_state])
+
+ if global_state != record.global_state:
+ record.global_state = global_state
+
+ ending_build = init_state not in ('done', 'running') and record.global_state in ('done', 'running')
+
+ if ending_build:
+ if not record.local_result: # Set 'ok' result if no result set (no tests job on build)
+ record.local_result = 'ok'
+ record.build_end = now()
+ if not record.parent_id:
+ record._github_status()
+
def _schedule(self):
"""schedule the build"""
icp = self.env['ir.config_parameter'].sudo()
- hosts_by_name = {h.name: h for h in self.env['runbot.host'].search([('name', 'in', self.mapped('host'))])}
- hosts_by_build = {b.id: hosts_by_name[b.host] for b in self}
- for build in self:
- if build.local_state not in ['testing', 'running']:
- raise UserError("Build %s is not testing/running: %s" % (build.id, build.local_state))
- if build.local_state == 'testing':
- # failfast in case of docker error (triggered in database)
- if build.triggered_result and not build.active_step.ignore_triggered_result:
- worst_result = self._get_worst_result([build.triggered_result, build.local_result])
- if worst_result != build.local_result:
- build.local_result = build.triggered_result
- build._github_status() # failfast
- # check if current job is finished
+ self.ensure_one()
+ build = self
+ if build.local_state not in ['testing', 'running', 'pending']:
+ return False
+ # check if current job is finished
+ if build.local_state == 'pending':
+ build._init_pendings()
+ else:
_docker_state = docker_state(build._get_docker_name(), build._path())
if _docker_state == 'RUNNING':
timeout = min(build.active_step.cpu_limit, int(icp.get_param('runbot.runbot_timeout', default=10000)))
if build.local_state != 'running' and build.job_time > timeout:
build._log('_schedule', '%s time exceeded (%ss)' % (build.active_step.name if build.active_step else "?", build.job_time))
build._kill(result='killed')
- continue
+ return False
elif _docker_state in ('UNKNOWN', 'GHOST') and (build.local_state == 'running' or build.active_step._is_docker_step()): # todo replace with docker_start
docker_time = time.time() - dt2time(build.docker_start or build.job_start)
if docker_time < 5:
- continue
+ return False
elif docker_time < 60:
_logger.info('container "%s" seems too take a while to start :%s' % (build.job_time, build._get_docker_name()))
- continue
+ return False
else:
build._log('_schedule', 'Docker with state %s not started after 60 seconds, skipping' % _docker_state, level='ERROR')
- if hosts_by_build[build.id]._fetch_local_logs(build_ids=build.ids):
- continue # avoid to make results with remaining logs
+ if self.env['runbot.host']._fetch_local_logs(build_ids=build.ids):
+ return True # avoid to make results with remaining logs
# No job running, make result and select next job
- build_values = {
- 'job_end': now(),
- 'docker_start': False,
- }
+
+ build.job_end = now()
+ build.docker_start = False
# make result of previous job
try:
- results = build.active_step._make_results(build)
+ build.active_step._make_results(build)
except Exception as e:
if isinstance(e, RunbotException):
message = e.args[0][:300000]
@@ -711,50 +722,70 @@ def _schedule(self):
message = 'An error occured while computing results of %s:\n %s' % (build.job, str(e).replace('\\n', '\n').replace("\\'", "'")[:10000])
_logger.exception(message)
build._log('_make_results', message, level='ERROR')
- results = {'local_result': 'ko'}
-
- build_values.update(results)
+ build.local_result = 'ko'
# compute statistics before starting next job
build.active_step._make_stats(build)
-
build.active_step.log_end(build)
- build_values.update(build._next_job_values()) # find next active_step or set to done
+ step_ids = self.params_id.config_id.step_ids()
+ if not step_ids: # no job to do, build is done
+ self.active_step = False
+ self.local_state = 'done'
+ build._log('_schedule', 'No job in config, doing nothing')
+ build.local_result = 'warn'
+ if not self.active_step and self.local_state != 'pending':
+ # means that a step has been run manually without using config (wakeup)
+ build.active_step = False
+ build.local_state = 'done'
- ending_build = build.local_state not in ('done', 'running') and build_values.get('local_state') in ('done', 'running')
- if ending_build:
- build.update_build_end()
+ if not self.active_step:
+ next_index = 0
+ else:
+ if self.active_step not in step_ids:
+ self._log('run', 'Config was modified and current step does not exists anymore, skipping.', level='ERROR')
+ self.active_step = False
+ self.local_state = 'done'
+ self.local_result = 'ko'
+ return False
+ next_index = step_ids.index(self.active_step) + 1
- build.write(build_values)
- if ending_build:
- if not build.local_result: # Set 'ok' result if no result set (no tests job on build)
- build.local_result = 'ok'
- build._logger("No result set, setting ok by default")
- build._github_status()
- build._run_job()
+ while True:
+ if next_index >= len(step_ids): # final job, build is done
+ self.active_step = False
+ self.local_state = 'done'
+ return False
+ new_step = step_ids[next_index] # job to do, state is job_state (testing or running)
+ if new_step.domain_filter and not self.filtered_domain(safe_eval(new_step.domain_filter)):
+ self._log('run', '**Skipping** step ~~%s~~ from config **%s**' % (new_step.name, self.params_id.config_id.name), log_type='markdown', level='SEPARATOR')
+ next_index += 1
+ continue
+ break
+ build.active_step = new_step.id
+ build.local_state = new_step._step_state()
+ return build._run_job()
def _run_job(self):
- # run job
- for build in self:
- if build.local_state != 'done':
- build._logger('running %s', build.active_step.name)
- os.makedirs(build._path('logs'), exist_ok=True)
- os.makedirs(build._path('datadir'), exist_ok=True)
- try:
- build.active_step._run(build) # run should be on build?
- except TransactionRollbackError:
- raise
- except Exception as e:
- if isinstance(e, RunbotException):
- message = e.args[0]
- else:
- message = '%s failed running step %s:\n %s' % (build.dest, build.job, str(e).replace('\\n', '\n').replace("\\'", "'"))
- _logger.exception(message)
- build._log("run", message, level='ERROR')
- build._kill(result='ko')
+ self.ensure_one()
+ build = self
+ if build.local_state != 'done':
+ build._logger('running %s', build.active_step.name)
+ os.makedirs(build._path('logs'), exist_ok=True)
+ os.makedirs(build._path('datadir'), exist_ok=True)
+ try:
+ return build.active_step._run(build) # run should be on build?
+ except TransactionRollbackError:
+ raise
+ except Exception as e:
+ if isinstance(e, RunbotException):
+ message = e.args[0]
+ else:
+ message = '%s failed running step %s:\n %s' % (build.dest, build.job, str(e).replace('\\n', '\n').replace("\\'", "'"))
+ _logger.exception(message)
+ build._log("run", message, level='ERROR')
+ build._kill(result='ko')
def _docker_run(self, cmd=None, ro_volumes=None, **kwargs):
self.ensure_one()
@@ -926,7 +957,6 @@ def _kill(self, result=None):
v['local_result'] = result
build.write(v)
self.env.cr.commit()
- build._github_status()
self.invalidate_cache()
def _ask_kill(self, lock=True, message=None):
@@ -1024,8 +1054,9 @@ def _cmd(self, python_params=None, py_version=None, local_only=True, sub_command
elif grep(config_path, "--xmlrpc-interface"):
command.add_config_tuple("xmlrpc_interface", "127.0.0.1")
+ log_db = self.env['ir.config_parameter'].get_param('runbot.logdb_name')
if grep(config_path, "log-db"):
- command.add_config_tuple("log_db", "runbot_logs")
+ command.add_config_tuple("log_db", log_db)
if grep(config_path, 'log-db-level'):
command.add_config_tuple("log_db_level", '25')
@@ -1051,35 +1082,7 @@ def _cmd_check(self, cmd):
'build_id': self.id
})
- def _next_job_values(self):
- self.ensure_one()
- step_ids = self.params_id.config_id.step_ids()
- if not step_ids: # no job to do, build is done
- return {'active_step': False, 'local_state': 'done'}
-
- if not self.active_step and self.local_state != 'pending':
- # means that a step has been run manually without using config
- return {'active_step': False, 'local_state': 'done'}
-
- if not self.active_step:
- next_index = 0
- else:
- if self.active_step not in step_ids:
- self._log('run', 'Config was modified and current step does not exists anymore, skipping.', level='ERROR')
- return {'active_step': False, 'local_state': 'done', 'local_result': self._get_worst_result([self.local_result, 'ko'])}
- next_index = step_ids.index(self.active_step) + 1
-
- while True:
- if next_index >= len(step_ids): # final job, build is done
- return {'active_step': False, 'local_state': 'done'}
- new_step = step_ids[next_index] # job to do, state is job_state (testing or running)
- if new_step.domain_filter and not self.filtered_domain(safe_eval(new_step.domain_filter)):
-
- self._log('run', '**Skipping** step ~~%s~~ from config **%s**' % (new_step.name, self.params_id.config_id.name), log_type='markdown', level='SEPARATOR')
- next_index += 1
- continue
- break
- return {'active_step': new_step.id, 'local_state': new_step._step_state()}
+
def _get_py_version(self):
"""return the python name to use from build batch"""
diff --git a/runbot/models/build_config.py b/runbot/models/build_config.py
index f87c9f8b9..2e9d2328c 100644
--- a/runbot/models/build_config.py
+++ b/runbot/models/build_config.py
@@ -166,7 +166,6 @@ class ConfigStep(models.Model):
# python
python_code = fields.Text('Python code', tracking=True, default=PYTHON_DEFAULT)
python_result_code = fields.Text('Python code for result', tracking=True, default=PYTHON_DEFAULT)
- ignore_triggered_result = fields.Boolean('Ignore error triggered in logs', tracking=True, default=False)
running_job = fields.Boolean('Job final state is running', default=False, help="Docker won't be killed if checked")
# create_build
create_config_ids = fields.Many2many('runbot.build.config', 'runbot_build_config_step_ids_create_config_ids_rel', string='New Build Configs', tracking=True, index=True)
@@ -275,14 +274,16 @@ def _run(self, build):
url = f"{log_url}/runbot/static/build/{build.dest}/logs/{self.name}.txt"
log_link = f'[@icon-file-text]({url})'
build._log('run', 'Starting step **%s** from config **%s** %s' % (self.name, build.params_id.config_id.name, log_link), log_type='markdown', level='SEPARATOR')
- self._run_step(build, log_path)
+ return self._run_step(build, log_path)
def _run_step(self, build, log_path, **kwargs):
build.log_counter = self.env['ir.config_parameter'].sudo().get_param('runbot.runbot_maxlogs', 100)
run_method = getattr(self, '_run_%s' % self.job_type)
docker_params = run_method(build, log_path, **kwargs)
if docker_params:
- build._docker_run(**docker_params)
+ def start_docker():
+ build._docker_run(**docker_params)
+ return start_docker
def _run_create_build(self, build, log_path):
count = 0
@@ -909,23 +910,20 @@ def _coverage_params(self, build, modules_to_install):
return ['--omit', ','.join(pattern_to_omit)]
def _make_results(self, build):
- build_values = {}
log_time = self._get_log_last_write(build)
if log_time:
- build_values['job_end'] = log_time
+ build.job_end = log_time
if self.job_type == 'python' and self.python_result_code and self.python_result_code != PYTHON_DEFAULT:
- build_values.update(self._make_python_results(build))
+ build.write(self._make_python_results(build))
elif self.job_type in ['install_odoo', 'python']:
if self.coverage:
- build_values.update(self._make_coverage_results(build))
+ build.write(self._make_coverage_results(build))
if self.test_enable or self.test_tags:
- build_values.update(self._make_tests_results(build))
+ build.write(self._make_tests_results(build))
elif self.job_type == 'test_upgrade':
- build_values.update(self._make_upgrade_results(build))
+ build.write(self._make_upgrade_results(build))
elif self.job_type == 'restore':
- build_values.update(self._make_restore_results(build))
-
- return build_values
+ build.write(self._make_restore_results(build))
def _make_python_results(self, build):
eval_ctx = self.make_python_ctx(build)
diff --git a/runbot/models/event.py b/runbot/models/event.py
index a771ce47b..d4c18233a 100644
--- a/runbot/models/event.py
+++ b/runbot/models/event.py
@@ -36,10 +36,11 @@ def create(self, vals_list):
build_logs = logs_by_build_id[build.id]
for ir_log in build_logs:
ir_log['active_step_id'] = build.active_step.id
- if ir_log['level'].upper() == 'WARNING':
- build.triggered_result = 'warn'
- elif ir_log['level'].upper() == 'ERROR':
- build.triggered_result = 'ko'
+ if build.local_state != 'running':
+ if ir_log['level'].upper() == 'WARNING':
+ build.local_result = 'warn'
+ elif ir_log['level'].upper() == 'ERROR':
+ build.local_result = 'ko'
return super().create(vals_list)
def _markdown(self):
diff --git a/runbot/models/host.py b/runbot/models/host.py
index cd84e6ea3..19e72a738 100644
--- a/runbot/models/host.py
+++ b/runbot/models/host.py
@@ -1,5 +1,6 @@
import logging
import getpass
+import time
from collections import defaultdict
@@ -112,9 +113,11 @@ def _docker_build(self):
self.clear_caches() # needed to ensure that content is updated on all hosts
for dockerfile in self.env['runbot.dockerfile'].search([('to_build', '=', True)]):
self._docker_build_dockerfile(dockerfile, static_path)
+ _logger.info('Done...')
def _docker_build_dockerfile(self, dockerfile, workdir):
- _logger.info('Building %s, %s', dockerfile.name, hash(str(dockerfile.dockerfile)))
+ start = time.time()
+ # _logger.info('Building %s, %s', dockerfile.name, hash(str(dockerfile.dockerfile)))
docker_build_path = os.path.join(workdir, 'docker', dockerfile.image_tag)
os.makedirs(docker_build_path, exist_ok=True)
@@ -137,6 +140,10 @@ def _docker_build_dockerfile(self, dockerfile, workdir):
dockerfile.to_build = False
dockerfile.message_post(body=f'Build failure:\n{msg}')
self.env['runbot.runbot'].warning(f'Dockerfile build "{dockerfile.image_tag}" failed on host {self.name}')
+ else:
+ duration = time.time() - start
+ if duration > 1:
+ _logger.info('Dockerfile %s finished build in %s', dockerfile.image_tag, duration)
def _get_work_path(self):
return os.path.abspath(os.path.join(os.path.dirname(__file__), '../static'))
@@ -244,3 +251,10 @@ def process_logs(self, build_ids=None):
logs_db_name = self.env['ir.config_parameter'].get_param('runbot.logdb_name')
with local_pg_cursor(logs_db_name) as local_cr:
local_cr.execute("DELETE FROM ir_logging WHERE id in %s", [tuple(local_log_ids)])
+
+ def get_build_domain(self, domain=None):
+ domain = domain or []
+ return [('host', '=', self.name)] + domain
+
+ def get_builds(self, domain, order=None):
+ return self.env['runbot.build'].search(self.get_build_domain(domain), order=order)
diff --git a/runbot/models/runbot.py b/runbot/models/runbot.py
index 679dfc1ff..26999d36b 100644
--- a/runbot/models/runbot.py
+++ b/runbot/models/runbot.py
@@ -39,78 +39,79 @@ def _root(self):
def _scheduler(self, host):
self._gc_testing(host)
self._commit()
- for build in self._get_builds_with_requested_actions(host):
- build = build.browse(build.id) # remove preftech ids, manage build one by one
+ processed = 0
+ for build in host.get_builds([('requested_action', 'in', ['wake_up', 'deathrow'])]):
+ processed += 1
build._process_requested_actions()
self._commit()
host.process_logs()
self._commit()
- for build in self._get_builds_to_schedule(host):
+ #for build in host.get_builds([('global_state', 'not in', ('running', 'done'))]).sorted(lambda b: -b.id):
+ # # processed += 1 # not sure if we should update
+ # build = build.browse(build.id) # remove preftech ids, manage build one by one
+
+ # self._commit()
+ for build in host.get_builds([('local_state', 'in', ['testing', 'running'])]) | self._get_builds_to_init(host):
build = build.browse(build.id) # remove preftech ids, manage build one by one
- build._schedule()
+ result = build._schedule()
+ if result:
+ processed += 1
self._commit()
- self._assign_pending_builds(host, host.nb_worker, [('build_type', '!=', 'scheduled')])
+ if callable(result):
+ result() # start docker
+ processed += self._assign_pending_builds(host, host.nb_worker, [('build_type', '!=', 'scheduled')])
self._commit()
- self._assign_pending_builds(host, host.nb_worker - 1 or host.nb_worker)
+ processed += self._assign_pending_builds(host, host.nb_worker - 1 or host.nb_worker)
self._commit()
- self._assign_pending_builds(host, host.nb_worker and host.nb_worker + 1, [('build_type', '=', 'priority')])
+ processed += self._assign_pending_builds(host, host.nb_worker and host.nb_worker + 1, [('build_type', '=', 'priority')])
self._commit()
- for build in self._get_builds_to_init(host):
- build = build.browse(build.id) # remove preftech ids, manage build one by one
- build._init_pendings(host)
- self._commit()
self._gc_running(host)
self._commit()
self._reload_nginx()
-
- def build_domain_host(self, host, domain=None):
- domain = domain or []
- return [('host', '=', host.name)] + domain
-
- def _get_builds_with_requested_actions(self, host):
- return self.env['runbot.build'].search(self.build_domain_host(host, [('requested_action', 'in', ['wake_up', 'deathrow'])]))
-
- def _get_builds_to_schedule(self, host):
- return self.env['runbot.build'].search(self.build_domain_host(host, [('local_state', 'in', ['testing', 'running'])]))
+ self._commit()
+ return processed
def _assign_pending_builds(self, host, nb_worker, domain=None):
if host.assigned_only or nb_worker <= 0:
- return
- domain_host = self.build_domain_host(host)
- reserved_slots = self.env['runbot.build'].search_count(domain_host + [('local_state', 'in', ('testing', 'pending'))])
+ return 0
+ reserved_slots = len(host.get_builds([('local_state', 'in', ('testing', 'pending'))]))
assignable_slots = (nb_worker - reserved_slots)
if assignable_slots > 0:
allocated = self._allocate_builds(host, assignable_slots, domain)
if allocated:
_logger.info('Builds %s where allocated to runbot', allocated)
+ return len(allocated)
+ return 0
def _get_builds_to_init(self, host):
- domain_host = self.build_domain_host(host)
- used_slots = self.env['runbot.build'].search_count(domain_host + [('local_state', '=', 'testing')])
+ domain_host = host.get_build_domain()
+ used_slots = len(host.get_builds([('local_state', '=', 'testing')]))
available_slots = host.nb_worker - used_slots
- if available_slots <= 0:
- return self.env['runbot.build']
- return self.env['runbot.build'].search(domain_host + [('local_state', '=', 'pending')], limit=available_slots)
+ build_to_init = self.env['runbot.build']
+ if available_slots > 0:
+ build_to_init |= self.env['runbot.build'].search(domain_host + [('local_state', '=', 'pending')], limit=available_slots)
+ if available_slots + 1 > 0:
+ build_to_init |= self.env['runbot.build'].search(domain_host + [('local_state', '=', 'pending'), ('build_type', '=', 'priority')], limit=1)
+ return build_to_init
def _gc_running(self, host):
running_max = host.get_running_max()
- domain_host = self.build_domain_host(host)
+ cannot_be_killed_ids = host.get_builds([('keep_running', '=', True)]).ids
Build = self.env['runbot.build']
- cannot_be_killed_ids = Build.search(domain_host + [('keep_running', '=', True)]).ids
sticky_bundles = self.env['runbot.bundle'].search([('sticky', '=', True), ('project_id.keep_sticky_running', '=', True)])
cannot_be_killed_ids += [
build.id
for build in sticky_bundles.mapped('last_batchs.slot_ids.build_id')
if build.host == host.name
][:running_max]
- build_ids = Build.search(domain_host + [('local_state', '=', 'running'), ('id', 'not in', cannot_be_killed_ids)], order='job_start desc').ids
+ build_ids = host.get_builds([('local_state', '=', 'running'), ('id', 'not in', cannot_be_killed_ids)], order='job_start desc').ids
Build.browse(build_ids)[running_max:]._kill()
def _gc_testing(self, host):
"""garbage collect builds that could be killed"""
# decide if we need room
Build = self.env['runbot.build']
- domain_host = self.build_domain_host(host)
+ domain_host = host.get_build_domain()
testing_builds = Build.search(domain_host + [('local_state', 'in', ['testing', 'pending']), ('requested_action', '!=', 'deathrow')])
used_slots = len(testing_builds)
available_slots = host.nb_worker - used_slots
@@ -277,14 +278,13 @@ def _fetch_loop_turn(self, host, pull_info_failures, default_sleep=1):
_logger.warning('Removing %s from pull_info_failures', pr_number)
del pull_info_failures[pr_number]
-
return manager.get('sleep', default_sleep)
- def _scheduler_loop_turn(self, host, default_sleep=5):
- _logger.info('Scheduling...')
+ def _scheduler_loop_turn(self, host, sleep=5):
with self.manage_host_exception(host) as manager:
- self._scheduler(host)
- return manager.get('sleep', default_sleep)
+ if self._scheduler(host):
+ sleep = 0.1
+ return manager.get('sleep', sleep)
@contextmanager
def manage_host_exception(self, host):
diff --git a/runbot/tests/common.py b/runbot/tests/common.py
index 67b86c5ae..57324dea1 100644
--- a/runbot/tests/common.py
+++ b/runbot/tests/common.py
@@ -181,7 +181,6 @@ def setUp(self):
self.start_patcher('docker_stop', 'odoo.addons.runbot.container._docker_stop')
self.start_patcher('docker_get_gateway_ip', 'odoo.addons.runbot.models.build_config.docker_get_gateway_ip', None)
- self.start_patcher('cr_commit', 'odoo.sql_db.Cursor.commit', None)
self.start_patcher('repo_commit', 'odoo.addons.runbot.models.runbot.Runbot._commit', None)
self.start_patcher('_local_cleanup_patcher', 'odoo.addons.runbot.models.build.BuildResult._local_cleanup')
self.start_patcher('_local_pg_dropdb_patcher', 'odoo.addons.runbot.models.build.BuildResult._local_pg_dropdb')
@@ -194,6 +193,11 @@ def setUp(self):
self.start_patcher('_get_py_version', 'odoo.addons.runbot.models.build.BuildResult._get_py_version', 3)
+ def no_commit(*_args, **_kwargs):
+ _logger.info('Skipping commit')
+
+ self.patch(self.env.cr, 'commit', no_commit)
+
def start_patcher(self, patcher_name, patcher_path, return_value=DEFAULT, side_effect=DEFAULT, new=DEFAULT):
diff --git a/runbot/tests/test_build.py b/runbot/tests/test_build.py
index d1fa191ad..cab7554f8 100644
--- a/runbot/tests/test_build.py
+++ b/runbot/tests/test_build.py
@@ -170,8 +170,10 @@ def test_base_fields(self):
# test a bulk write, that one cannot change from 'ko' to 'ok'
builds = self.Build.browse([build.id, other.id])
- with self.assertRaises(ValidationError):
- builds.write({'local_result': 'ok'})
+ builds.write({'local_result': 'warn'})
+ self.assertEqual(build.local_result, 'warn')
+ self.assertEqual(other.local_result, 'ko')
+
def test_markdown_description(self):
build = self.Build.create({
@@ -331,6 +333,11 @@ def test_children(self):
build1 = self.Build.create({
'params_id': self.server_params.id,
})
+ self.assertEqual('pending', build1.global_state)
+
+ build1.local_state = 'testing'
+ self.assertEqual('testing', build1.global_state)
+
build1_1 = self.Build.create({
'params_id': self.server_params.id,
'parent_id': build1.id,
@@ -339,6 +346,15 @@ def test_children(self):
'params_id': self.server_params.id,
'parent_id': build1.id,
})
+ self.assertEqual('testing', build1.global_state)
+ self.assertEqual('pending', build1_1.global_state)
+ self.assertEqual('pending', build1_2.global_state)
+
+ build1_1.local_state = 'testing'
+ self.assertEqual('testing', build1.global_state)
+ self.assertEqual('testing', build1_1.global_state)
+ self.assertEqual('pending', build1_2.global_state)
+
build1_1_1 = self.Build.create({
'params_id': self.server_params.id,
'parent_id': build1_1.id,
@@ -348,60 +364,130 @@ def test_children(self):
'parent_id': build1_1.id,
})
- def assert_state(global_state, build):
- self.assertEqual(build.global_state, global_state)
+ self.assertEqual('testing', build1.global_state)
+ self.assertEqual('testing', build1_1.global_state)
+ self.assertEqual('pending', build1_2.global_state)
+ self.assertEqual('pending', build1_1_1.global_state)
+ self.assertEqual('pending', build1_1_2.global_state)
- assert_state('pending', build1)
- assert_state('pending', build1_1)
- assert_state('pending', build1_2)
- assert_state('pending', build1_1_1)
- assert_state('pending', build1_1_2)
+ with self.assertQueries(['''UPDATE "runbot_build" SET "global_state"=%s,"local_state"=%s,"write_date"=%s,"write_uid"=%s WHERE id IN %s''']):
+ build1_2.local_state = "testing"
+
+ self.assertEqual('testing', build1.global_state)
+ self.assertEqual('testing', build1_2.global_state)
+
+ with self.assertQueries([]): # no change should be triggered
+ build1_2.local_state = "testing"
+
+ with self.assertQueries(['''UPDATE "runbot_build" SET "global_state"=%s,"local_state"=%s,"write_date"=%s,"write_uid"=%s WHERE id IN %s''']):
+ build1.local_state = 'done'
+ build1.flush()
+
+ self.assertEqual('waiting', build1.global_state)
+ self.assertEqual('testing', build1_1.global_state)
+
+ with self.assertQueries([]):
+ build1.local_state = 'done' # write the same value, no update should be triggered
+ build1.flush()
- build1.local_state = 'testing'
- build1_1.local_state = 'testing'
- build1.local_state = 'done'
build1_1.local_state = 'done'
- assert_state('waiting', build1)
- assert_state('waiting', build1_1)
- assert_state('pending', build1_2)
- assert_state('pending', build1_1_1)
- assert_state('pending', build1_1_2)
+ self.assertEqual('waiting', build1.global_state)
+ self.assertEqual('waiting', build1_1.global_state)
+ self.assertEqual('testing', build1_2.global_state)
+ self.assertEqual('pending', build1_1_1.global_state)
+ self.assertEqual('pending', build1_1_2.global_state)
build1_1_1.local_state = 'testing'
- assert_state('waiting', build1)
- assert_state('waiting', build1_1)
- assert_state('pending', build1_2)
- assert_state('testing', build1_1_1)
- assert_state('pending', build1_1_2)
+ self.assertEqual('waiting', build1.global_state)
+ self.assertEqual('waiting', build1_1.global_state)
+ self.assertEqual('testing', build1_2.global_state)
+ self.assertEqual('testing', build1_1_1.global_state)
+ self.assertEqual('pending', build1_1_2.global_state)
- build1_2.local_state = 'testing'
+ with self.assertQueries([]):
+ build1_2.local_state = 'testing'
- assert_state('waiting', build1)
- assert_state('waiting', build1_1)
- assert_state('testing', build1_2)
- assert_state('testing', build1_1_1)
- assert_state('pending', build1_1_2)
+ self.assertEqual('waiting', build1.global_state)
+ self.assertEqual('waiting', build1_1.global_state)
+ self.assertEqual('testing', build1_2.global_state)
+ self.assertEqual('testing', build1_1_1.global_state)
+ self.assertEqual('pending', build1_1_2.global_state)
- build1_2.local_state = 'testing' # writing same state a second time
+ build1_2.local_state = 'done'
+ build1_1_1.local_state = 'done'
+ build1_1_2.local_state = 'testing'
- assert_state('waiting', build1)
- assert_state('waiting', build1_1)
- assert_state('testing', build1_2)
- assert_state('testing', build1_1_1)
- assert_state('pending', build1_1_2)
+ self.assertEqual('waiting', build1.global_state)
+ self.assertEqual('waiting', build1_1.global_state)
+ self.assertEqual('done', build1_2.global_state)
+ self.assertEqual('done', build1_1_1.global_state)
+ self.assertEqual('testing', build1_1_2.global_state)
build1_1_2.local_state = 'done'
- build1_1_1.local_state = 'done'
- build1_2.local_state = 'done'
- assert_state('done', build1)
- assert_state('done', build1_1)
- assert_state('done', build1_2)
- assert_state('done', build1_1_1)
- assert_state('done', build1_1_2)
+ self.assertEqual('done', build1.global_state)
+ self.assertEqual('done', build1_1.global_state)
+ self.assertEqual('done', build1_2.global_state)
+ self.assertEqual('done', build1_1_1.global_state)
+ self.assertEqual('done', build1_1_2.global_state)
+
+ def test_rebuild_sub_sub_build(self):
+ build1 = self.Build.create({
+ 'params_id': self.server_params.id,
+ })
+ build1.local_state = 'testing'
+ build1_1 = self.Build.create({
+ 'params_id': self.server_params.id,
+ 'parent_id': build1.id,
+ })
+ build1_1.local_state = 'testing'
+ build1.local_state = 'done'
+ build1_1_1 = self.Build.create({
+ 'params_id': self.server_params.id,
+ 'parent_id': build1_1.id,
+ })
+ build1_1_1.local_state = 'testing'
+ build1_1.local_state = 'done'
+ self.assertEqual('waiting', build1.global_state)
+ self.assertEqual('waiting', build1_1.global_state)
+ self.assertEqual('testing', build1_1_1.global_state)
+ build1_1_1.local_result = 'ko'
+ build1_1_1.local_state = 'done'
+ self.assertEqual('done', build1.global_state)
+ self.assertEqual('done', build1_1.global_state)
+ self.assertEqual('done', build1_1_1.global_state)
+ self.assertEqual('ko', build1.global_result)
+ self.assertEqual('ko', build1_1.global_result)
+ self.assertEqual('ko', build1_1_1.global_result)
+
+ rebuild1_1_1 = self.Build.create({ # this is a rebuild
+ 'params_id': self.server_params.id,
+ 'parent_id': build1_1.id,
+ })
+ build1_1_1.orphan_result = True
+
+ self.assertEqual('ok', build1.global_result)
+ self.assertEqual('ok', build1_1.global_result)
+ self.assertEqual('ko', build1_1_1.global_result)
+ self.assertEqual('waiting', build1.global_state)
+ self.assertEqual('waiting', build1_1.global_state)
+ self.assertEqual('done', build1_1_1.global_state)
+ self.assertEqual('pending', rebuild1_1_1.global_state)
+
+ rebuild1_1_1.local_result = 'ok'
+ rebuild1_1_1.local_state = 'done'
+
+ self.assertEqual('ok', build1.global_result)
+ self.assertEqual('ok', build1_1.global_result)
+ self.assertEqual('ko', build1_1_1.global_result)
+ self.assertEqual(False, rebuild1_1_1.global_result)
+ self.assertEqual('done', build1.global_state)
+ self.assertEqual('done', build1_1.global_state)
+ self.assertEqual('done', build1_1_1.global_state)
+ self.assertEqual('done', rebuild1_1_1.global_state)
class TestGc(RunbotCaseMinimalSetup):
@@ -446,6 +532,7 @@ def test_repo_gc_testing(self):
build_a.write({'local_state': 'testing', 'host': host.name})
build_b.write({'local_state': 'testing', 'host': 'runbot_yyy'})
+
# no room needed, verify that nobody got killed
self.Runbot._gc_testing(host)
self.assertFalse(build_a.requested_action)
diff --git a/runbot/tests/test_build_config_step.py b/runbot/tests/test_build_config_step.py
index 1053a0987..524116c04 100644
--- a/runbot/tests/test_build_config_step.py
+++ b/runbot/tests/test_build_config_step.py
@@ -214,7 +214,6 @@ def setUp(self):
def test_config_step_create_results(self):
""" Test child builds are taken into account"""
-
self.config_step._run_create_build(self.parent_build, '/tmp/essai')
self.assertEqual(len(self.parent_build.children_ids), 2, 'Two sub-builds should have been generated')
@@ -222,8 +221,10 @@ def test_config_step_create_results(self):
for child_build in self.parent_build.children_ids:
self.assertFalse(child_build.orphan_result)
child_build.local_result = 'ko'
+
self.assertEqual(child_build.global_result, 'ko')
+
self.assertEqual(self.parent_build.global_result, 'ko')
def test_config_step_create(self):
@@ -236,6 +237,7 @@ def test_config_step_create(self):
for child_build in self.parent_build.children_ids:
self.assertTrue(child_build.orphan_result, 'An orphan result config step should mark the build as orphan_result')
child_build.local_result = 'ko'
+ # child_build._update_:globals()
self.assertEqual(self.parent_build.global_result, 'ok')
@@ -455,19 +457,19 @@ def docker_run(cmd, log_path, *args, **kwargs):
self.patchers['docker_run'].side_effect = docker_run
- config_step._run_step(self.parent_build, 'dev/null/logpath')
+ config_step._run_step(self.parent_build, 'dev/null/logpath')()
assert_db_name = 'custom_build'
parent_build_params = self.parent_build.params_id.copy({'config_data': {'db_name': 'custom_build'}})
parent_build = self.parent_build.copy({'params_id': parent_build_params.id})
- config_step._run_step(parent_build, 'dev/null/logpath')
+ config_step._run_step(parent_build, 'dev/null/logpath')()
config_step = self.ConfigStep.create({
'name': 'run_test',
'job_type': 'run_odoo',
'custom_db_name': 'custom',
})
- config_step._run_step(parent_build, 'dev/null/logpath')
+ config_step._run_step(parent_build, 'dev/null/logpath')()
self.assertEqual(call_count, 3)
@@ -489,7 +491,7 @@ def docker_run(cmd, *args, **kwargs):
self.assertIn('-d test_database', run_cmd)
self.patchers['docker_run'].side_effect = docker_run
- config_step._run_step(self.parent_build, 'dev/null/logpath')
+ config_step._run_step(self.parent_build, 'dev/null/logpath')()
self.patchers['docker_run'].assert_called_once()
db = self.env['runbot.database'].search([('name', '=', 'test_database')])
self.assertEqual(db.build_id, self.parent_build)
@@ -525,7 +527,7 @@ def docker_run(cmd, log_path, *args, **kwargs):
call_count += 1
self.patchers['docker_run'].side_effect = docker_run
- config_step._run_step(self.parent_build, 'dev/null/logpath')
+ config_step._run_step(self.parent_build, 'dev/null/logpath')()
self.assertEqual(call_count, 1)
@@ -564,10 +566,14 @@ def _log(func, message, level='INFO', log_type='runbot', path='runbot'):
})
logs = []
with patch('builtins.open', mock_open(read_data=file_content)):
- result = config_step._make_results(build)
- self.assertEqual(result, {'job_end': '1970-01-01 02:00:00', 'local_result': 'ok'})
+ config_step._make_results(build)
+ self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')
self.assertEqual(logs, [('INFO', 'Getting results for build %s' % build.dest)])
+ self.assertEqual(build.local_result, 'ok')
# no shutdown
+ build = self.Build.create({
+ 'params_id': self.base_params.id,
+ })
logs = []
file_content = """
Loading stuff
@@ -575,26 +581,34 @@ def _log(func, message, level='INFO', log_type='runbot', path='runbot'):
Some post install stuff
"""
with patch('builtins.open', mock_open(read_data=file_content)):
- result = config_step._make_results(build)
- self.assertEqual(result, {'job_end': '1970-01-01 02:00:00', 'local_result': 'ko'})
+ config_step._make_results(build)
+ self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')
+ self.assertEqual(build.local_result, 'ko')
self.assertEqual(logs, [
('INFO', 'Getting results for build %s' % build.dest),
('ERROR', 'No "Initiating shutdown" found in logs, maybe because of cpu limit.')
])
# no loaded
+ build = self.Build.create({
+ 'params_id': self.base_params.id,
+ })
logs = []
file_content = """
Loading stuff
"""
with patch('builtins.open', mock_open(read_data=file_content)):
- result = config_step._make_results(build)
- self.assertEqual(result, {'job_end': '1970-01-01 02:00:00', 'local_result': 'ko'})
+ config_step._make_results(build)
+ self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')
+ self.assertEqual(build.local_result, 'ko')
self.assertEqual(logs, [
('INFO', 'Getting results for build %s' % build.dest),
('ERROR', 'Modules loaded not found in logs')
])
# traceback
+ build = self.Build.create({
+ 'params_id': self.base_params.id,
+ })
logs = []
file_content = """
Loading stuff
@@ -607,14 +621,18 @@ def _log(func, message, level='INFO', log_type='runbot', path='runbot'):
Initiating shutdown
"""
with patch('builtins.open', mock_open(read_data=file_content)):
- result = config_step._make_results(build)
- self.assertEqual(result, {'job_end': '1970-01-01 02:00:00', 'local_result': 'ko'})
+ config_step._make_results(build)
+ self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')
+ self.assertEqual(build.local_result, 'ko')
self.assertEqual(logs, [
('INFO', 'Getting results for build %s' % build.dest),
('ERROR', 'Error or traceback found in logs')
])
# warning in logs
+ build = self.Build.create({
+ 'params_id': self.base_params.id,
+ })
logs = []
file_content = """
Loading stuff
@@ -624,8 +642,9 @@ def _log(func, message, level='INFO', log_type='runbot', path='runbot'):
Initiating shutdown
"""
with patch('builtins.open', mock_open(read_data=file_content)):
- result = config_step._make_results(build)
- self.assertEqual(result, {'job_end': '1970-01-01 02:00:00', 'local_result': 'warn'})
+ config_step._make_results(build)
+ self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')
+ self.assertEqual(build.local_result, 'warn')
self.assertEqual(logs, [
('INFO', 'Getting results for build %s' % build.dest),
('WARNING', 'Warning found in logs')
@@ -634,15 +653,18 @@ def _log(func, message, level='INFO', log_type='runbot', path='runbot'):
# no log file
logs = []
self.patchers['isfile'].return_value = False
- result = config_step._make_results(build)
+ config_step._make_results(build)
- self.assertEqual(result, {'local_result': 'ko'})
+ self.assertEqual(build.local_result, 'ko')
self.assertEqual(logs, [
('INFO', 'Getting results for build %s' % build.dest),
('ERROR', 'Log file not found at the end of test job')
])
# no error but build was already in warn
+ build = self.Build.create({
+ 'params_id': self.base_params.id,
+ })
logs = []
file_content = """
Loading stuff
@@ -653,11 +675,12 @@ def _log(func, message, level='INFO', log_type='runbot', path='runbot'):
self.patchers['isfile'].return_value = True
build.local_result = 'warn'
with patch('builtins.open', mock_open(read_data=file_content)):
- result = config_step._make_results(build)
+ config_step._make_results(build)
self.assertEqual(logs, [
('INFO', 'Getting results for build %s' % build.dest)
])
- self.assertEqual(result, {'job_end': '1970-01-01 02:00:00', 'local_result': 'warn'})
+ self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')
+ self.assertEqual(build.local_result, 'warn')
@patch('odoo.addons.runbot.models.build_config.ConfigStep._make_tests_results')
def test_make_python_result(self, mock_make_tests_results):
@@ -672,18 +695,18 @@ def test_make_python_result(self, mock_make_tests_results):
})
build.local_state = 'testing'
self.patchers['isfile'].return_value = False
- result = config_step._make_results(build)
- self.assertEqual(result, {'local_result': 'ok'})
+ config_step._make_results(build)
+ self.assertEqual(build.local_result, 'ok')
# invalid result code (no return_value set)
config_step.python_result_code = """a = 2*5\nr = {'a': 'ok'}\nreturn_value = 'ko'"""
with self.assertRaises(RunbotException):
- result = config_step._make_results(build)
+ config_step._make_results(build)
# no result defined
config_step.python_result_code = ""
- mock_make_tests_results.return_value = {'local_result': 'warning'}
- result = config_step._make_results(build)
- self.assertEqual(result, {'local_result': 'warning'})
+ mock_make_tests_results.return_value = {'local_result': 'warn'}
+ config_step._make_results(build)
+ self.assertEqual(build.local_result, 'warn')
# TODO add generic test to copy_paste _run_* in a python step
diff --git a/runbot/tests/test_build_error.py b/runbot/tests/test_build_error.py
index c7cca2579..0d616ab6e 100644
--- a/runbot/tests/test_build_error.py
+++ b/runbot/tests/test_build_error.py
@@ -33,8 +33,10 @@ def setUp(self):
def test_build_scan(self):
IrLog = self.env['ir.logging']
- ko_build = self.create_test_build({'local_result': 'ko'})
- ok_build = self.create_test_build({'local_result': 'ok'})
+ ko_build = self.create_test_build({'local_result': 'ok', 'local_state': 'testing'})
+ ok_build = self.create_test_build({'local_result': 'ok', 'local_state': 'running'})
+
+
error_team = self.BuildErrorTeam.create({
'name': 'test-error-team',
@@ -56,6 +58,10 @@ def test_build_scan(self):
IrLog.create(log)
log.update({'build_id': ok_build.id})
IrLog.create(log)
+
+ self.assertEqual(ko_build.local_result, 'ko', 'Testing build should have gone ko after error log')
+ self.assertEqual(ok_build.local_result, 'ok', 'Running build should not have gone ko after error log')
+
ko_build._parse_logs()
ok_build._parse_logs()
build_error = self.BuildError.search([('build_ids', 'in', [ko_build.id])])
diff --git a/runbot/tests/test_host.py b/runbot/tests/test_host.py
index fb839910a..bb47fc59d 100644
--- a/runbot/tests/test_host.py
+++ b/runbot/tests/test_host.py
@@ -88,7 +88,7 @@ def test_build_logs(self):
]),
1,
)
- self.assertEqual(build.triggered_result, 'warn', 'A warning log should sets the build in warn')
+ self.assertEqual(build.local_result, 'warn', 'A warning log should sets the build in warn')
# now check that error logs sets the build in ko
logs = fetch_local_logs_return_value(nb_logs=1, build_dest=build.dest, level='ERROR')
@@ -103,7 +103,7 @@ def test_build_logs(self):
]),
1,
)
- self.assertEqual(build.triggered_result, 'ko', 'An error log should sets the build in ko')
+ self.assertEqual(build.local_result, 'ko', 'An error log should sets the build in ko')
build.log_counter = 10
# Test log limit
diff --git a/runbot/tests/test_schedule.py b/runbot/tests/test_schedule.py
index ba9dcddb5..0e47f6d7b 100644
--- a/runbot/tests/test_schedule.py
+++ b/runbot/tests/test_schedule.py
@@ -23,6 +23,7 @@ def test_schedule_mark_done(self, mock_docker_state, mock_getmtime):
build = self.Build.create({
'local_state': 'testing',
+ 'global_state': 'testing',
'port': '1234',
'host': host.name,
'job_start': datetime.datetime.now(),
@@ -33,9 +34,10 @@ def test_schedule_mark_done(self, mock_docker_state, mock_getmtime):
self.assertEqual(build.local_state, 'testing')
build._schedule() # too fast, docker not started
self.assertEqual(build.local_state, 'testing')
+ self.assertEqual(build.local_result, 'ok')
self.start_patcher('fetch_local_logs', 'odoo.addons.runbot.models.host.Host._fetch_local_logs', []) # the local logs have to be empty
build.write({'job_start': datetime.datetime.now() - datetime.timedelta(seconds=70)}) # docker never started
build._schedule()
self.assertEqual(build.local_state, 'done')
- self.assertEqual(build.local_result, 'ok')
+ self.assertEqual(build.local_result, 'ko')
diff --git a/runbot/tests/test_upgrade.py b/runbot/tests/test_upgrade.py
index e8c9ce215..0cc52b687 100644
--- a/runbot/tests/test_upgrade.py
+++ b/runbot/tests/test_upgrade.py
@@ -1,6 +1,7 @@
import getpass
import logging
import getpass
+from unittest.mock import patch, mock_open
from odoo.exceptions import UserError
from odoo.tools import mute_logger
from .common import RunbotCase
@@ -272,7 +273,7 @@ def test_all(self):
upgrade_current_build = batch.slot_ids.filtered(lambda slot: slot.trigger_id == self.trigger_upgrade_server).build_id
host = self.env['runbot.host']._get_current()
upgrade_current_build.host = host.name
- upgrade_current_build._init_pendings(host)
+ upgrade_current_build._schedule()
self.start_patcher('fetch_local_logs', 'odoo.addons.runbot.models.host.Host._fetch_local_logs', []) # the local logs have to be empty
upgrade_current_build._schedule()
self.assertEqual(upgrade_current_build.local_state, 'done')
@@ -298,7 +299,7 @@ def assertOk(build, t, f, b_type, db_suffix, trigger):
upgrade_build = batch.slot_ids.filtered(lambda slot: slot.trigger_id == self.trigger_upgrade).build_id
host = self.env['runbot.host']._get_current()
upgrade_build.host = host.name
- upgrade_build._init_pendings(host)
+ upgrade_build._schedule()
upgrade_build._schedule()
self.assertEqual(upgrade_build.local_state, 'done')
self.assertEqual(len(upgrade_build.children_ids), 2)
@@ -339,7 +340,7 @@ def assertOk(build, t, f, b_type, db_suffix, trigger):
upgrade_nightly = batch.slot_ids.filtered(lambda slot: slot.trigger_id == trigger_upgrade_addons_nightly).build_id
host = self.env['runbot.host']._get_current()
upgrade_nightly.host = host.name
- upgrade_nightly._init_pendings(host)
+ upgrade_nightly._schedule()
upgrade_nightly._schedule()
to_version_builds = upgrade_nightly.children_ids
self.assertEqual(upgrade_nightly.local_state, 'done')
@@ -353,9 +354,14 @@ def assertOk(build, t, f, b_type, db_suffix, trigger):
[]
)
to_version_builds.host = host.name
- to_version_builds._init_pendings(host)
- to_version_builds._schedule()
- self.assertEqual(to_version_builds.mapped('local_state'), ['done']*4)
+ for build in to_version_builds:
+ build._schedule() # starts builds
+ self.assertEqual(build.local_state, 'testing')
+ build._schedule() # makes result and end build
+ self.assertEqual(build.local_state, 'done')
+
+ self.assertEqual(to_version_builds.mapped('global_state'), ['done', 'waiting', 'waiting', 'waiting'], 'One build have no child, other should wait for children')
+
from_version_builds = to_version_builds.children_ids
self.assertEqual(
[
@@ -368,9 +374,15 @@ def assertOk(build, t, f, b_type, db_suffix, trigger):
['11.0->12.0', 'saas-11.3->12.0', '12.0->13.0', 'saas-12.3->13.0', '13.0->master', 'saas-13.1->master', 'saas-13.2->master', 'saas-13.3->master']
)
from_version_builds.host = host.name
- from_version_builds._init_pendings(host)
- from_version_builds._schedule()
- self.assertEqual(from_version_builds.mapped('local_state'), ['done']*8)
+ for build in from_version_builds:
+ build._schedule()
+ self.assertEqual(build.local_state, 'testing')
+ build._schedule()
+ self.assertEqual(build.local_state, 'done')
+
+
+ self.assertEqual(from_version_builds.mapped('global_state'), ['waiting'] * 8)
+
db_builds = from_version_builds.children_ids
self.assertEqual(len(db_builds), 40)
@@ -405,61 +417,73 @@ def assertOk(build, t, f, b_type, db_suffix, trigger):
[b.params_id.dump_db.db_suffix for b in b133_master],
['account', 'l10n_be', 'l10n_ch', 'mail', 'stock'] # is this order ok?
)
+ current_build = db_builds[0]
+ for current_build in db_builds:
+ self.start_patcher('docker_state', 'odoo.addons.runbot.models.build.docker_state', 'END')
+
+ suffix = current_build.params_id.dump_db.db_suffix
+ source_dest = current_build.params_id.dump_db.build_id.dest
+ def docker_run_restore(cmd, *args, **kwargs):
+ dump_url = f'http://host.runbot.com/runbot/static/build/{source_dest}/logs/{source_dest}-{suffix}.zip'
+ zip_name = f'{source_dest}-{suffix}.zip'
+ db_name = f'{current_build.dest}-{suffix}'
+ self.assertEqual(
+ str(cmd).split(' && '),
+ [
+ 'mkdir /data/build/restore',
+ 'cd /data/build/restore',
+ f'wget {dump_url}',
+ f'unzip -q {zip_name}',
+ 'echo "### restoring filestore"',
+ f'mkdir -p /data/build/datadir/filestore/{db_name}',
+ f'mv filestore/* /data/build/datadir/filestore/{db_name}',
+ 'echo "### restoring db"',
+ f'psql -q {db_name} < dump.sql',
+ 'cd /data/build',
+ 'echo "### cleaning"',
+ 'rm -r restore',
+ 'echo "### listing modules"',
+ f'psql {db_name} -c "select name from ir_module_module where state = \'installed\'" -t -A > /data/build/logs/restore_modules_installed.txt',
+ 'echo "### restore" "successful"'
+ ]
+ )
+ self.patchers['docker_run'].side_effect = docker_run_restore
+ current_build.host = host.name
+ current_build._schedule()()
+ self.patchers['docker_run'].assert_called()
+
+ def docker_run_upgrade(cmd, *args, ro_volumes=False, **kwargs):
+ user = getpass.getuser()
+ self.assertTrue(ro_volumes.pop(f'/home/{user}/.odoorc').startswith('/tmp/runbot_test/static/build/'))
+ self.assertEqual(
+ list(ro_volumes.keys()), [
+ '/data/build/addons',
+ '/data/build/server',
+ '/data/build/upgrade',
+ ],
+ "other commit should have been added automaticaly"
+ )
+ self.assertEqual(
+ str(cmd),
+ 'python3 server/server.py {addons_path} --no-xmlrpcs --no-netrpc -u all -d {db_name} --stop-after-init --max-cron-threads=0'.format(
+ addons_path='--addons-path addons,server/addons,server/core/addons',
+ db_name=f'{current_build.dest}-{suffix}')
+ )
+ self.patchers['docker_run'].side_effect = docker_run_upgrade
+ current_build._schedule()()
- first_build = db_builds[0]
-
- self.start_patcher('docker_state', 'odoo.addons.runbot.models.build.docker_state', 'END')
-
- def docker_run_restore(cmd, *args, **kwargs):
- source_dest = first_build.params_id.dump_db.build_id.dest
- dump_url='http://host.runbot.com/runbot/static/build/%s/logs/%s-account.zip' % (source_dest, source_dest)
- zip_name='%s-account.zip' % source_dest
- db_name='%s-master-account' % str(first_build.id).zfill(5)
- self.assertEqual(
- str(cmd).split(' && '),
- [
- 'mkdir /data/build/restore',
- 'cd /data/build/restore',
- f'wget {dump_url}',
- f'unzip -q {zip_name}',
- 'echo "### restoring filestore"',
- f'mkdir -p /data/build/datadir/filestore/{db_name}',
- f'mv filestore/* /data/build/datadir/filestore/{db_name}',
- 'echo "### restoring db"',
- f'psql -q {db_name} < dump.sql',
- 'cd /data/build',
- 'echo "### cleaning"',
- 'rm -r restore',
- 'echo "### listing modules"',
- f'psql {db_name} -c "select name from ir_module_module where state = \'installed\'" -t -A > /data/build/logs/restore_modules_installed.txt',
- 'echo "### restore" "successful"'
- ]
- )
- self.patchers['docker_run'].side_effect = docker_run_restore
- first_build.host = host.name
- first_build._init_pendings(host)
- self.patchers['docker_run'].assert_called()
-
- def docker_run_upgrade(cmd, *args, ro_volumes=False, **kwargs):
- user = getpass.getuser()
- self.assertTrue(ro_volumes.pop(f'/home/{user}/.odoorc').startswith('/tmp/runbot_test/static/build/'))
- self.assertEqual(
- ro_volumes, {
- '/data/build/addons': '/tmp/runbot_test/static/sources/addons/addons120',
- '/data/build/server': '/tmp/runbot_test/static/sources/server/server120',
- '/data/build/upgrade': '/tmp/runbot_test/static/sources/upgrade/123abc789',
- },
- "other commit should have been added automaticaly"
- )
- self.assertEqual(
- str(cmd),
- 'python3 server/server.py {addons_path} --no-xmlrpcs --no-netrpc -u all -d {db_name} --stop-after-init --max-cron-threads=0'.format(
- addons_path='--addons-path addons,server/addons,server/core/addons',
- db_name='%s-master-account' % str(first_build.id).zfill(5))
- )
- self.patchers['docker_run'].side_effect = docker_run_upgrade
- first_build._schedule()
- self.assertEqual(self.patchers['docker_run'].call_count, 2)
+ with patch('builtins.open', mock_open(read_data='')):
+ current_build._schedule()
+ self.assertEqual(current_build.local_state, 'done')
+
+ self.assertEqual(current_build.global_state, 'done')
+ #self.assertEqual(current_build.global_result, 'ok')
+
+ self.assertEqual(self.patchers['docker_run'].call_count, 80)
+
+ self.assertEqual(from_version_builds.mapped('global_state'), ['done'] * 8)
+
+ self.assertEqual(to_version_builds.mapped('global_state'), ['done'] * 4)
# test_build_references
batch = self.master_bundle._force()
@@ -525,7 +549,7 @@ def docker_run_upgrade(cmd, *args, ro_volumes=False, **kwargs):
for db in ['base', 'all', 'no-demo-all']:
upgrade_complement_build_13.database_ids = [(0, 0, {'name': '%s-%s' % (upgrade_complement_build_13.dest, db)})]
- upgrade_complement_build_13._init_pendings(host)
+ upgrade_complement_build_13._schedule()
self.assertEqual(len(upgrade_complement_build_13.children_ids), 5)
master_child = upgrade_complement_build_13.children_ids[0]
diff --git a/runbot/views/build_views.xml b/runbot/views/build_views.xml
index 524999bb9..9b68869b9 100644
--- a/runbot/views/build_views.xml
+++ b/runbot/views/build_views.xml
@@ -68,7 +68,6 @@
-
diff --git a/runbot/views/config_views.xml b/runbot/views/config_views.xml
index d10b644be..20accff74 100644
--- a/runbot/views/config_views.xml
+++ b/runbot/views/config_views.xml
@@ -61,7 +61,6 @@
-
diff --git a/runbot_builder/builder.py b/runbot_builder/builder.py
index 6745f6ad2..062a84741 100755
--- a/runbot_builder/builder.py
+++ b/runbot_builder/builder.py
@@ -20,7 +20,7 @@ def on_start(self):
repo._update(force=True)
def loop_turn(self):
- if self.count == 1: # cleanup at second iteration
+ if self.count == 1: # cleanup at second iteration
self.env['runbot.runbot']._source_cleanup()
self.env['runbot.build']._local_cleanup()
self.env['runbot.runbot']._docker_cleanup()