• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import datetime, logging, os, time
6
7
8from autotest_lib.client.common_lib import base_job, global_config, log
9from autotest_lib.client.common_lib import time_utils
10from autotest_lib.client.common_lib.host_queue_entry_states \
11    import IntStatus as HqeIntStatus
12
13DEFAULT_POLL_INTERVAL_SECONDS = 10
14
15HQE_MAXIMUM_ABORT_RATE_FLOAT = global_config.global_config.get_config_value(
16            'SCHEDULER', 'hqe_maximum_abort_rate_float', type=float,
17            default=0.5)
18
19
20def view_is_relevant(view):
21    """
22    Indicates whether the view of a given test is meaningful or not.
23
24    @param view: a detailed test 'view' from the TKO DB to look at.
25    @return True if this is a test result worth looking at further.
26    """
27    return not view['test_name'].startswith('CLIENT_JOB')
28
29
30def view_is_for_suite_prep(view):
31    """
32    Indicates whether the given test view is the view of Suite prep.
33
34    @param view: a detailed test 'view' from the TKO DB to look at.
35    @return True if this is view of suite preparation.
36    """
37    return view['test_name'] == 'SERVER_JOB'
38
39
40def view_is_for_infrastructure_fail(view):
41    """
42    Indicates whether the given test view is from an infra fail.
43
44    @param view: a detailed test 'view' from the TKO DB to look at.
45    @return True if this view indicates an infrastructure-side issue during
46                 a test.
47    """
48    return view['test_name'].endswith('SERVER_JOB')
49
50
51def is_for_infrastructure_fail(status):
52    """
53    Indicates whether the given Status is from an infra fail.
54
55    @param status: the Status object to look at.
56    @return True if this Status indicates an infrastructure-side issue during
57                 a test.
58    """
59    return view_is_for_infrastructure_fail({'test_name': status.test_name})
60
61
62def gather_job_hostnames(afe, job):
63    """
64    Collate and return names of hosts used in |job|.
65
66    @param afe: an instance of AFE as defined in server/frontend.py.
67    @param job: the job to poll on.
68    @return iterable of hostnames on which |job| was run, using None as
69            placeholders.
70    """
71    hosts = []
72    for e in afe.run('get_host_queue_entries', job=job.id):
73        # If the host queue entry has not yet made it into or past the running
74        # stage, we should skip it for now.
75        if (HqeIntStatus.get_value(e['status']) <
76            HqeIntStatus.get_value(HqeIntStatus.RUNNING)):
77            hosts.append(None)
78        elif not e['host']:
79            logging.warning('Job %s (%s) has an entry with no host!',
80                         job.name, job.id)
81            hosts.append(None)
82        else:
83            hosts.append(e['host']['hostname'])
84    return hosts
85
86
87def check_job_abort_status(afe, jobs):
88    """
89    Checks the abort status of all the jobs in jobs and if any have too many
90    aborted HostQueueEntries, return True.
91
92    In the case that any job in jobs has too many aborted host queue entries,
93    it will raise an exception.
94
95    @param afe: an instance of AFE as defined in server/frontend.py.
96    @param jobs: an iterable of Running frontend.Jobs
97
98    @returns True if a job in job has too many host queue entries aborted.
99             False otherwise.
100    """
101    for job in jobs:
102        entries = afe.run('get_host_queue_entries', job=job.id)
103        num_aborted = 0
104        for hqe in entries:
105            if hqe['aborted']:
106                num_aborted = num_aborted + 1
107        if num_aborted > len(entries) * HQE_MAXIMUM_ABORT_RATE_FLOAT:
108            # This job was not successful, returning True.
109            logging.error('Too many host queue entries were aborted for job: '
110                          '%s.', job.id)
111            return True
112    return False
113
114
115def _abort_jobs_if_timedout(afe, jobs, start_time, timeout_mins):
116    """
117    Abort all of the jobs in jobs if the running time has past the timeout.
118
119    @param afe: an instance of AFE as defined in server/frontend.py.
120    @param jobs: an iterable of Running frontend.Jobs
121    @param start_time: Time to compare to the current time to see if a timeout
122                       has occurred.
123    @param timeout_mins: Time in minutes to wait before aborting the jobs we
124                         are waiting on.
125
126    @returns True if we there was a timeout, False if not.
127    """
128    if datetime.datetime.utcnow() < (start_time +
129                                     datetime.timedelta(minutes=timeout_mins)):
130        return False
131    for job in jobs:
132        logging.debug('Job: %s has timed out after %s minutes. Aborting job.',
133                      job.id, timeout_mins)
134        afe.run('abort_host_queue_entries', job=job.id)
135    return True
136
137
138def wait_for_jobs_to_start(afe, jobs, interval=DEFAULT_POLL_INTERVAL_SECONDS,
139                           start_time=None, wait_timeout_mins=None):
140    """
141    Wait for the job specified by |job.id| to start.
142
143    @param afe: an instance of AFE as defined in server/frontend.py.
144    @param jobs: the jobs to poll on.
145    @param interval: polling interval in seconds.
146    @param start_time: Time to compare to the current time to see if a timeout
147                       has occurred.
148    @param wait_timeout_mins: Time in minutes to wait before aborting the jobs
149                               we are waiting on.
150
151    @returns True if the jobs have started, False if they get aborted.
152    """
153    if not start_time:
154        start_time = datetime.datetime.utcnow()
155    job_ids = [j.id for j in jobs]
156    while job_ids:
157        if wait_timeout_mins and _abort_jobs_if_timedout(afe, jobs, start_time,
158                    wait_timeout_mins):
159            # The timeout parameter is not None and we have indeed timed out.
160            return False
161        for job_id in list(job_ids):
162            if len(afe.get_jobs(id=job_id, not_yet_run=True)) > 0:
163                continue
164            job_ids.remove(job_id)
165            logging.debug('Re-imaging job %d running.', job_id)
166        if job_ids:
167            logging.debug('Waiting %ds before polling again.', interval)
168            time.sleep(interval)
169    return True
170
171
172def wait_for_jobs_to_finish(afe, jobs, interval=DEFAULT_POLL_INTERVAL_SECONDS,
173                            start_time=None, wait_timeout_mins=None):
174    """
175    Wait for the jobs specified by each |job.id| to finish.
176
177    @param afe: an instance of AFE as defined in server/frontend.py.
178    @param interval: polling interval in seconds.
179    @param jobs: the jobs to poll on.
180    @param start_time: Time to compare to the current time to see if a timeout
181                       has occurred. Defaults to now.
182    @param wait_timeout_mins: Time in minutes to wait before aborting the jobs
183                               we are waiting on. Defaults to no timeout.
184
185    @returns True if the jobs have finished, False if they get aborted.
186    """
187    if not start_time:
188        start_time = datetime.datetime.utcnow()
189    job_ids = [j.id for j in jobs]
190    while job_ids:
191        if wait_timeout_mins and _abort_jobs_if_timedout(afe, jobs, start_time,
192                    wait_timeout_mins):
193            # The timeout parameter is not None and we have indeed timed out.
194            return False
195        for job_id in list(job_ids):
196            if not afe.get_jobs(id=job_id, finished=True):
197                continue
198            job_ids.remove(job_id)
199            logging.debug('Re-imaging job %d finished.', job_id)
200        if job_ids:
201            logging.debug('Waiting %ds before polling again.', interval)
202            time.sleep(interval)
203    return True
204
205
206def wait_for_and_lock_job_hosts(afe, jobs, manager,
207                                interval=DEFAULT_POLL_INTERVAL_SECONDS,
208                                start_time=None, wait_timeout_mins=None):
209    """
210    Poll until devices have begun reimaging, locking them as we go.
211
212    Gather the hosts chosen for |job| -- which must be in the Running
213    state itself -- and as they each individually come online and begin
214    Running, lock them.  Poll until all chosen hosts have gone to Running
215    and been locked using |manager|.
216
217    @param afe: an instance of AFE as defined in server/frontend.py.
218    @param jobs: an iterable of Running frontend.Jobs
219    @param manager: a HostLockManager instance.  Hosts will be added to it
220                    as they start Running, and it will be used to lock them.
221    @param start_time: Time to compare to the current time to see if a timeout
222                       has occurred.
223    @param interval: polling interval.
224    @param wait_timeout_mins: Time in minutes to wait before aborting the jobs
225                              we are waiting on.
226
227    @return iterable of the hosts that were locked or None if all the jobs in
228            jobs have been aborted.
229    """
230    def get_all_hosts(my_jobs):
231        """
232        Returns a list of all hosts for jobs in my_jobs.
233
234        @param my_jobs: a list of all the jobs we need hostnames for.
235        @return: a list of hostnames that correspond to my_jobs.
236        """
237        all_hosts = []
238        for job in my_jobs:
239            all_hosts.extend(gather_job_hostnames(afe, job))
240        return all_hosts
241
242    if not start_time:
243        start_time = datetime.datetime.utcnow()
244    locked_hosts = set()
245    expected_hosts = set(get_all_hosts(jobs))
246    logging.debug('Initial expected hosts: %r', expected_hosts)
247
248    while locked_hosts != expected_hosts:
249        if wait_timeout_mins and _abort_jobs_if_timedout(afe, jobs, start_time,
250                                                         wait_timeout_mins):
251            # The timeout parameter is not None and we have timed out.
252            return locked_hosts
253        hosts_to_check = [e for e in expected_hosts if e]
254        if hosts_to_check:
255            logging.debug('Checking to see if %r are Running.', hosts_to_check)
256            running_hosts = afe.get_hosts(hosts_to_check, status='Running')
257            hostnames = [h.hostname for h in running_hosts]
258            if set(hostnames) - locked_hosts != set():
259                # New hosts to lock!
260                logging.debug('Locking %r.', hostnames)
261                manager.lock(hostnames)
262            locked_hosts = locked_hosts.union(hostnames)
263        time.sleep(interval)
264        # 'None' in expected_hosts means we had entries in the job with no
265        # host yet assigned, or which weren't Running yet.  We need to forget
266        # that across loops, though, and remember only hosts we really used.
267        expected_hosts = expected_hosts.difference([None])
268
269        # get_all_hosts() returns only hosts that are currently Running a
270        # job we care about.  By unioning with other hosts that we already
271        # saw, we get the set of all the hosts that have run a job we care
272        # about.
273        expected_hosts = expected_hosts.union(get_all_hosts(jobs))
274        logging.debug('Locked hosts: %r', locked_hosts)
275        logging.debug('Expected hosts: %r', expected_hosts)
276
277
278    return locked_hosts
279
280
281def _collate_aborted(current_value, entry):
282    """
283    reduce() over a list of HostQueueEntries for a job; True if any aborted.
284
285    Functor that can be reduced()ed over a list of
286    HostQueueEntries for a job.  If any were aborted
287    (|entry.aborted| exists and is True), then the reduce() will
288    return True.
289
290    Ex:
291      entries = AFE.run('get_host_queue_entries', job=job.id)
292      reduce(_collate_aborted, entries, False)
293
294    @param current_value: the current accumulator (a boolean).
295    @param entry: the current entry under consideration.
296    @return the value of |entry.aborted| if it exists, False if not.
297    """
298    return current_value or ('aborted' in entry and entry['aborted'])
299
300
301def _status_for_test(status):
302    """
303    Indicates whether the status of a given test is meaningful or not.
304
305    @param status: frontend.TestStatus object to look at.
306    @return True if this is a test result worth looking at further.
307    """
308    return not (status.test_name.startswith('SERVER_JOB') or
309                status.test_name.startswith('CLIENT_JOB'))
310
311
312def _yield_job_results(afe, tko, job):
313    """
314    Yields the results of an individual job.
315
316    Yields one Status object per test.
317
318    @param afe: an instance of AFE as defined in server/frontend.py.
319    @param tko: an instance of TKO as defined in server/frontend.py.
320    @param job: Job object to get results from, as defined in
321                server/frontend.py
322    @yields an iterator of Statuses, one per test.
323    """
324    entries = afe.run('get_host_queue_entries', job=job.id)
325
326    # This query uses the job id to search through the tko_test_view_2
327    # table, for results of a test with a similar job_tag. The job_tag
328    # is used to store results, and takes the form job_id-owner/host.
329    # Many times when a job aborts during a test, the job_tag actually
330    # exists and the results directory contains valid logs. If the job
331    # was aborted prematurely i.e before it had a chance to create the
332    # job_tag, this query will return no results. When statuses is not
333    # empty it will contain frontend.TestStatus' with fields populated
334    # using the results of the db query.
335    statuses = tko.get_job_test_statuses_from_db(job.id)
336    if not statuses:
337        yield Status('ABORT', job.name)
338
339    # We only care about the SERVER and CLIENT job failures when there
340    # are no test failures.
341    contains_test_failure = any(_status_for_test(s) and s.status != 'GOOD'
342                                for s in statuses)
343    for s in statuses:
344        # TKO parser uniquelly identifies a test run by
345        # (test_name, subdir). In dynamic suite, we need to emit
346        # a subdir for each status and make sure (test_name, subdir)
347        # in the suite job's status log is unique.
348        # For non-test status (i.e.SERVER_JOB, CLIENT_JOB),
349        # we use 'job_tag' from tko_test_view_2, which looks like
350        # '1246-owner/172.22.33.44'
351        # For normal test status, we use 'job_tag/subdir'
352        # which looks like '1246-owner/172.22.33.44/my_DummyTest.tag.subdir_tag'
353        if _status_for_test(s):
354            yield Status(s.status, s.test_name, s.reason,
355                         s.test_started_time, s.test_finished_time,
356                         job.id, job.owner, s.hostname, job.name,
357                         subdir=os.path.join(s.job_tag, s.subdir))
358        else:
359            if s.status != 'GOOD' and not contains_test_failure:
360                yield Status(s.status,
361                             '%s_%s' % (entries[0]['job']['name'],
362                                        s.test_name),
363                             s.reason, s.test_started_time,
364                             s.test_finished_time, job.id,
365                             job.owner, s.hostname, job.name,
366                             subdir=s.job_tag)
367
368
369def wait_for_child_results(afe, tko, parent_job_id):
370    """
371    Wait for results of all tests in jobs with given parent id.
372
373    New jobs could be added by calling send(new_jobs) on the generator.
374    Currently polls for results every 5s.  Yields one Status object per test
375    as results become available.
376
377    @param afe: an instance of AFE as defined in server/frontend.py.
378    @param tko: an instance of TKO as defined in server/frontend.py.
379    @param parent_job_id: Parent job id for the jobs to wait on.
380    @yields an iterator of Statuses, one per test.
381    """
382    remaining_child_jobs = set(job.id for job in
383                               afe.get_jobs(parent_job_id=parent_job_id))
384    while remaining_child_jobs:
385        new_finished_jobs = [job for job in
386                             afe.get_jobs(parent_job_id=parent_job_id,
387                                          finished=True)
388                             if job.id in remaining_child_jobs]
389
390        for job in new_finished_jobs:
391
392            remaining_child_jobs.remove(job.id)
393            for result in _yield_job_results(afe, tko, job):
394                # To figure out what new jobs (like retry jobs) have been
395                # created since last iteration, we could re-poll for
396                # the set of child jobs in each iteration and
397                # calculate the set difference against the set we got in
398                # last iteration. As an alternative, we could just make
399                # the caller 'send' new jobs to this generator. We go
400                # with the latter to avoid unnecessary overhead.
401                new_child_jobs = (yield result)
402                if new_child_jobs:
403                    remaining_child_jobs.update([new_job.id for new_job in
404                                                 new_child_jobs])
405                    # Return nothing if 'send' is called
406                    yield None
407
408        time.sleep(5)
409
410
411def wait_for_results(afe, tko, jobs):
412    """
413    Wait for results of all tests in all jobs in |jobs|.
414
415    New jobs could be added by calling send(new_jobs) on the generator.
416    Currently polls for results every 5s.  Yields one Status object per test
417    as results become available.
418
419    @param afe: an instance of AFE as defined in server/frontend.py.
420    @param tko: an instance of TKO as defined in server/frontend.py.
421    @param jobs: a list of Job objects, as defined in server/frontend.py.
422    @yields an iterator of Statuses, one per test.
423    """
424    local_jobs = list(jobs)
425    while local_jobs:
426        for job in list(local_jobs):
427            if not afe.get_jobs(id=job.id, finished=True):
428                continue
429
430            local_jobs.remove(job)
431            for result in _yield_job_results(afe, tko, job):
432                # The caller can 'send' new jobs (i.e. retry jobs)
433                # to this generator at any time.
434                new_jobs = (yield result)
435                if new_jobs:
436                    local_jobs.extend(new_jobs)
437                    # Return nothing if 'send' is called
438                    yield None
439
440        time.sleep(5)
441
442
443def gather_per_host_results(afe, tko, jobs, name_prefix=''):
444    """
445    Gather currently-available results for all |jobs|, aggregated per-host.
446
447    For each job in |jobs|, gather per-host results and summarize into a single
448    log entry.  For example, a FAILed SERVER_JOB and successful actual test
449    is reported as a FAIL.
450
451    @param afe: an instance of AFE as defined in server/frontend.py.
452    @param tko: an instance of TKO as defined in server/frontend.py.
453    @param jobs: a list of Job objects, as defined in server/frontend.py.
454    @param name_prefix: optional string to prepend to Status object names.
455    @return a dict mapping {hostname: Status}, one per host used in a Job.
456    """
457    to_return = {}
458    for job in jobs:
459        for s in tko.get_job_test_statuses_from_db(job.id):
460            candidate = Status(s.status,
461                               name_prefix+s.hostname,
462                               s.reason,
463                               s.test_started_time,
464                               s.test_finished_time)
465            if (s.hostname not in to_return or
466                candidate.is_worse_than(to_return[s.hostname])):
467                to_return[s.hostname] = candidate
468
469        # If we didn't find more specific data above for a host, fill in here.
470        # For jobs that didn't even make it to finding a host, just collapse
471        # into a single log entry.
472        for e in afe.run('get_host_queue_entries', job=job.id):
473            host = e['host']['hostname'] if e['host'] else 'hostless' + job.name
474            if host not in to_return:
475                to_return[host] = Status(Status.STATUS_MAP[e['status']],
476                                         job.name,
477                                         'Did not run',
478                                         begin_time_str=job.created_on)
479
480    return to_return
481
482
483def check_and_record_reimage_results(per_host_statuses, group, record_entry):
484    """
485    Record all Statuses in results and return True if at least one was GOOD.
486
487    @param per_host_statuses: dict mapping {hostname: Status}, one per host
488                              used in a Job.
489    @param group: the HostGroup used for the Job whose results we're reporting.
490    @param record_entry: a callable to use for logging.
491               prototype:
492                   record_entry(base_job.status_log_entry)
493    @return True if at least one of the Statuses are good.
494    """
495    failures = []
496    for hostname, status in per_host_statuses.iteritems():
497        if status.is_good():
498            group.mark_host_success(hostname)
499            status.record_all(record_entry)
500        else:
501            failures.append(status)
502
503    success = group.enough_hosts_succeeded()
504    if success:
505        for failure in failures:
506            logging.warning("%s failed to reimage.", failure.test_name)
507            failure.override_status('WARN')
508            failure.record_all(record_entry)
509    else:
510        for failure in failures:
511            # No need to log warnings; the job is failing.
512            failure.record_all(record_entry)
513
514    return success
515
516
517class Status(object):
518    """
519    A class representing a test result.
520
521    Stores all pertinent info about a test result and, given a callable
522    to use, can record start, result, and end info appropriately.
523
524    @var _status: status code, e.g. 'INFO', 'FAIL', etc.
525    @var _test_name: the name of the test whose result this is.
526    @var _reason: message explaining failure, if any.
527    @var _begin_timestamp: when test started (int, in seconds since the epoch).
528    @var _end_timestamp: when test finished (int, in seconds since the epoch).
529    @var _id: the ID of the job that generated this Status.
530    @var _owner: the owner of the job that generated this Status.
531
532    @var STATUS_MAP: a dict mapping host queue entry status strings to canonical
533                     status codes; e.g. 'Aborted' -> 'ABORT'
534    """
535    _status = None
536    _test_name = None
537    _reason = None
538    _begin_timestamp = None
539    _end_timestamp = None
540
541    # Queued status can occur if the try job just aborted due to not completing
542    # reimaging for all machines. The Queued corresponds to an 'ABORT'.
543    STATUS_MAP = {'Failed': 'FAIL', 'Aborted': 'ABORT', 'Completed': 'GOOD',
544                  'Queued' : 'ABORT'}
545
546    class sle(base_job.status_log_entry):
547        """
548        Thin wrapper around status_log_entry that supports stringification.
549        """
550        def __str__(self):
551            return self.render()
552
553        def __repr__(self):
554            return self.render()
555
556
557    def __init__(self, status, test_name, reason='', begin_time_str=None,
558                 end_time_str=None, job_id=None, owner=None, hostname=None,
559                 job_name='', subdir=None):
560        """
561        Constructor
562
563        @param status: status code, e.g. 'INFO', 'FAIL', etc.
564        @param test_name: the name of the test whose result this is.
565        @param reason: message explaining failure, if any; Optional.
566        @param begin_time_str: when test started (in time_utils.TIME_FMT);
567                               now() if None or 'None'.
568        @param end_time_str: when test finished (in time_utils.TIME_FMT);
569                             now() if None or 'None'.
570        @param job_id: the ID of the job that generated this Status.
571        @param owner: the owner of the job that generated this Status.
572        @param hostname: The name of the host the test that generated this
573                         result ran on.
574        @param job_name: The job name; Contains the test name with/without the
575                         experimental prefix, the tag and the build.
576        @param subdir: The result directory of the test. It will be recorded
577                       as the subdir in the status.log file.
578        """
579        self._status = status
580        self._test_name = test_name
581        self._reason = reason
582        self._id = job_id
583        self._owner = owner
584        self._hostname = hostname
585        self._job_name = job_name
586        self._subdir = subdir
587        # Autoserv drops a keyval of the started time which eventually makes its
588        # way here.  Therefore, if we have a starting time, we may assume that
589        # the test reached Running and actually began execution on a drone.
590        self._test_executed = begin_time_str and begin_time_str != 'None'
591
592        if begin_time_str and begin_time_str != 'None':
593            self._begin_timestamp = int(time.mktime(
594                datetime.datetime.strptime(
595                    begin_time_str, time_utils.TIME_FMT).timetuple()))
596        else:
597            self._begin_timestamp = int(time.time())
598
599        if end_time_str and end_time_str != 'None':
600            self._end_timestamp = int(time.mktime(
601                datetime.datetime.strptime(
602                    end_time_str, time_utils.TIME_FMT).timetuple()))
603        else:
604            self._end_timestamp = int(time.time())
605
606
607    def is_good(self):
608        """ Returns true if status is good. """
609        return self._status == 'GOOD'
610
611
612    def is_warn(self):
613        """ Returns true if status is warn. """
614        return self._status == 'WARN'
615
616
617    def is_testna(self):
618        """ Returns true if status is TEST_NA """
619        return self._status == 'TEST_NA'
620
621
622    def is_worse_than(self, candidate):
623        """
624        Return whether |self| represents a "worse" failure than |candidate|.
625
626        "Worse" is defined the same as it is for log message purposes in
627        common_lib/log.py.  We also consider status with a specific error
628        message to represent a "worse" failure than one without.
629
630        @param candidate: a Status instance to compare to this one.
631        @return True if |self| is "worse" than |candidate|.
632        """
633        if self._status != candidate._status:
634            return (log.job_statuses.index(self._status) <
635                    log.job_statuses.index(candidate._status))
636        # else, if the statuses are the same...
637        if self._reason and not candidate._reason:
638            return True
639        return False
640
641
642    def record_start(self, record_entry):
643        """
644        Use record_entry to log message about start of test.
645
646        @param record_entry: a callable to use for logging.
647               prototype:
648                   record_entry(base_job.status_log_entry)
649        """
650        log_entry = Status.sle('START', self._subdir,
651                                self._test_name, '',
652                                None, self._begin_timestamp)
653        record_entry(log_entry, log_in_subdir=False)
654
655
656    def record_result(self, record_entry):
657        """
658        Use record_entry to log message about result of test.
659
660        @param record_entry: a callable to use for logging.
661               prototype:
662                   record_entry(base_job.status_log_entry)
663        """
664        log_entry = Status.sle(self._status, self._subdir,
665                                self._test_name, self._reason, None,
666                                self._end_timestamp)
667        record_entry(log_entry, log_in_subdir=False)
668
669
670    def record_end(self, record_entry):
671        """
672        Use record_entry to log message about end of test.
673
674        @param record_entry: a callable to use for logging.
675               prototype:
676                   record_entry(base_job.status_log_entry)
677        """
678        log_entry = Status.sle('END %s' % self._status, self._subdir,
679                               self._test_name, '', None, self._end_timestamp)
680        record_entry(log_entry, log_in_subdir=False)
681
682
683    def record_all(self, record_entry):
684        """
685        Use record_entry to log all messages about test results.
686
687        @param record_entry: a callable to use for logging.
688               prototype:
689                   record_entry(base_job.status_log_entry)
690        """
691        self.record_start(record_entry)
692        self.record_result(record_entry)
693        self.record_end(record_entry)
694
695
696    def override_status(self, override):
697        """
698        Override the _status field of this Status.
699
700        @param override: value with which to override _status.
701        """
702        self._status = override
703
704
705    @property
706    def test_name(self):
707        """ Name of the test this status corresponds to. """
708        return self._test_name
709
710
711    @test_name.setter
712    def test_name(self, value):
713        """
714        Test name setter.
715
716        @param value: The test name.
717        """
718        self._test_name = value
719
720
721    @property
722    def id(self):
723        """ Id of the job that corresponds to this status. """
724        return self._id
725
726
727    @property
728    def owner(self):
729        """ Owner of the job that corresponds to this status. """
730        return self._owner
731
732
733    @property
734    def hostname(self):
735        """ Host the job corresponding to this status ran on. """
736        return self._hostname
737
738
739    @property
740    def reason(self):
741        """ Reason the job corresponding to this status failed. """
742        return self._reason
743
744
745    @property
746    def test_executed(self):
747        """ If the test reached running an autoserv instance or not. """
748        return self._test_executed
749
750    @property
751    def subdir(self):
752        """Subdir of test this status corresponds to."""
753        return self._subdir
754