• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#
2# Copyright 2008 Google Inc. All Rights Reserved.
3
4"""
5The job module contains the objects and methods used to
6manage jobs in Autotest.
7
8The valid actions are:
9list:    lists job(s)
10create:  create a job
11abort:   abort job(s)
12stat:    detailed listing of job(s)
13
14The common options are:
15
16See topic_common.py for a High Level Design and Algorithm.
17"""
18
19# pylint: disable=missing-docstring
20
21import getpass, re
22from autotest_lib.cli import topic_common, action_common
23from autotest_lib.client.common_lib import control_data
24from autotest_lib.client.common_lib import priorities
25
26
27class job(topic_common.atest):
28    """Job class
29    atest job [create|clone|list|stat|abort] <options>"""
30    usage_action = '[create|clone|list|stat|abort]'
31    topic = msg_topic = 'job'
32    msg_items = '<job_ids>'
33
34
35    def _convert_status(self, results):
36        for result in results:
37            total = sum(result['status_counts'].values())
38            status = ['%s=%s(%.1f%%)' % (key, val, 100.0*float(val)/total)
39                      for key, val in result['status_counts'].iteritems()]
40            status.sort()
41            result['status_counts'] = ', '.join(status)
42
43
44    def backward_compatibility(self, action, argv):
45        """ 'job create --clone' became 'job clone --id' """
46        if action == 'create':
47            for option in ['-l', '--clone']:
48                if option in argv:
49                    argv[argv.index(option)] = '--id'
50                    action = 'clone'
51        return action
52
53
54class job_help(job):
55    """Just here to get the atest logic working.
56    Usage is set by its parent"""
57    pass
58
59
60class job_list_stat(action_common.atest_list, job):
61    def __init__(self):
62        super(job_list_stat, self).__init__()
63
64        self.topic_parse_info = topic_common.item_parse_info(
65            attribute_name='jobs',
66            use_leftover=True)
67
68
69    def __split_jobs_between_ids_names(self):
70        job_ids = []
71        job_names = []
72
73        # Sort between job IDs and names
74        for job_id in self.jobs:
75            if job_id.isdigit():
76                job_ids.append(job_id)
77            else:
78                job_names.append(job_id)
79        return (job_ids, job_names)
80
81
82    def execute_on_ids_and_names(self, op, filters={},
83                                 check_results={'id__in': 'id',
84                                                'name__in': 'id'},
85                                 tag_id='id__in', tag_name='name__in'):
86        if not self.jobs:
87            # Want everything
88            return super(job_list_stat, self).execute(op=op, filters=filters)
89
90        all_jobs = []
91        (job_ids, job_names) = self.__split_jobs_between_ids_names()
92
93        for items, tag in [(job_ids, tag_id),
94                          (job_names, tag_name)]:
95            if items:
96                new_filters = filters.copy()
97                new_filters[tag] = items
98                jobs = super(job_list_stat,
99                             self).execute(op=op,
100                                           filters=new_filters,
101                                           check_results=check_results)
102                all_jobs.extend(jobs)
103
104        return all_jobs
105
106
107class job_list(job_list_stat):
108    """atest job list [<jobs>] [--all] [--running] [--user <username>]"""
109    def __init__(self):
110        super(job_list, self).__init__()
111        self.parser.add_option('-a', '--all', help='List jobs for all '
112                               'users.', action='store_true', default=False)
113        self.parser.add_option('-r', '--running', help='List only running '
114                               'jobs', action='store_true')
115        self.parser.add_option('-u', '--user', help='List jobs for given '
116                               'user', type='string')
117
118
119    def parse(self):
120        options, leftover = super(job_list, self).parse()
121        self.all = options.all
122        self.data['running'] = options.running
123        if options.user:
124            if options.all:
125                self.invalid_syntax('Only specify --all or --user, not both.')
126            else:
127                self.data['owner'] = options.user
128        elif not options.all and not self.jobs:
129            self.data['owner'] = getpass.getuser()
130
131        return options, leftover
132
133
134    def execute(self):
135        return self.execute_on_ids_and_names(op='get_jobs_summary',
136                                             filters=self.data)
137
138
139    def output(self, results):
140        keys = ['id', 'owner', 'name', 'status_counts']
141        if self.verbose:
142            keys.extend(['priority', 'control_type', 'created_on'])
143        self._convert_status(results)
144        super(job_list, self).output(results, keys)
145
146
147
148class job_stat(job_list_stat):
149    """atest job stat <job>"""
150    usage_action = 'stat'
151
152    def __init__(self):
153        super(job_stat, self).__init__()
154        self.parser.add_option('-f', '--control-file',
155                               help='Display the control file',
156                               action='store_true', default=False)
157        self.parser.add_option('-N', '--list-hosts',
158                               help='Display only a list of hosts',
159                               action='store_true')
160        self.parser.add_option('-s', '--list-hosts-status',
161                               help='Display only the hosts in these statuses '
162                               'for a job.', action='store')
163
164
165    def parse(self):
166        status_list = topic_common.item_parse_info(
167                attribute_name='status_list',
168                inline_option='list_hosts_status')
169        options, leftover = super(job_stat, self).parse([status_list],
170                                                        req_items='jobs')
171
172        if not self.jobs:
173            self.invalid_syntax('Must specify at least one job.')
174
175        self.show_control_file = options.control_file
176        self.list_hosts = options.list_hosts
177
178        if self.list_hosts and self.status_list:
179            self.invalid_syntax('--list-hosts is implicit when using '
180                                '--list-hosts-status.')
181        if len(self.jobs) > 1 and (self.list_hosts or self.status_list):
182            self.invalid_syntax('--list-hosts and --list-hosts-status should '
183                                'only be used on a single job.')
184
185        return options, leftover
186
187
188    def _merge_results(self, summary, qes):
189        hosts_status = {}
190        for qe in qes:
191            if qe['host']:
192                job_id = qe['job']['id']
193                hostname = qe['host']['hostname']
194                hosts_status.setdefault(job_id,
195                                        {}).setdefault(qe['status'],
196                                                       []).append(hostname)
197
198        for job in summary:
199            job_id = job['id']
200            if hosts_status.has_key(job_id):
201                this_job = hosts_status[job_id]
202                job['hosts'] = ' '.join(' '.join(host) for host in
203                                        this_job.itervalues())
204                host_per_status = ['%s="%s"' %(status, ' '.join(host))
205                                   for status, host in this_job.iteritems()]
206                job['hosts_status'] = ', '.join(host_per_status)
207                if self.status_list:
208                    statuses = set(s.lower() for s in self.status_list)
209                    all_hosts = [s for s in host_per_status if s.split('=',
210                                 1)[0].lower() in statuses]
211                    job['hosts_selected_status'] = '\n'.join(all_hosts)
212            else:
213                job['hosts_status'] = ''
214
215            if not job.get('hosts'):
216                self.generic_error('Job has unassigned meta-hosts, '
217                                   'try again shortly.')
218
219        return summary
220
221
222    def execute(self):
223        summary = self.execute_on_ids_and_names(op='get_jobs_summary')
224
225        # Get the real hostnames
226        qes = self.execute_on_ids_and_names(op='get_host_queue_entries',
227                                            check_results={},
228                                            tag_id='job__in',
229                                            tag_name='job__name__in')
230
231        self._convert_status(summary)
232
233        return self._merge_results(summary, qes)
234
235
236    def output(self, results):
237        if self.list_hosts:
238            keys = ['hosts']
239        elif self.status_list:
240            keys = ['hosts_selected_status']
241        elif not self.verbose:
242            keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status']
243        else:
244            keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status',
245                    'owner', 'control_type',  'synch_count', 'created_on',
246                    'run_verify', 'reboot_before', 'reboot_after',
247                    'parse_failed_repair']
248
249        if self.show_control_file:
250            keys.append('control_file')
251
252        super(job_stat, self).output(results, keys)
253
254
255class job_create_or_clone(action_common.atest_create, job):
256    """Class containing the code common to the job create and clone actions"""
257    msg_items = 'job_name'
258
259    def __init__(self):
260        super(job_create_or_clone, self).__init__()
261        self.hosts = []
262        self.data_item_key = 'name'
263        self.parser.add_option('-p', '--priority',
264                               help='Job priority (int)', type='int',
265                               default=priorities.Priority.DEFAULT)
266        self.parser.add_option('-b', '--labels',
267                               help='Comma separated list of labels '
268                               'to get machine list from.', default='')
269        self.parser.add_option('-m', '--machine', help='List of machines to '
270                               'run on')
271        self.parser.add_option('-M', '--mlist',
272                               help='File listing machines to use',
273                               type='string', metavar='MACHINE_FLIST')
274        self.parser.add_option('--one-time-hosts',
275                               help='List of one time hosts')
276        self.parser.add_option('-e', '--email',
277                               help='A comma seperated list of '
278                               'email addresses to notify of job completion',
279                               default='')
280
281
282    def _parse_hosts(self, args):
283        """ Parses the arguments to generate a list of hosts and meta_hosts
284        A host is a regular name, a meta_host is n*label or *label.
285        These can be mixed on the CLI, and separated by either commas or
286        spaces, e.g.: 5*Machine_Label host0 5*Machine_Label2,host2 """
287
288        hosts = []
289        meta_hosts = []
290
291        for arg in args:
292            for host in arg.split(','):
293                if re.match('^[0-9]+[*]', host):
294                    num, host = host.split('*', 1)
295                    meta_hosts += int(num) * [host]
296                elif re.match('^[*](\w*)', host):
297                    meta_hosts += [re.match('^[*](\w*)', host).group(1)]
298                elif host != '' and host not in hosts:
299                    # Real hostname and not a duplicate
300                    hosts.append(host)
301
302        return (hosts, meta_hosts)
303
304
305    def parse(self, parse_info=[]):
306        host_info = topic_common.item_parse_info(attribute_name='hosts',
307                                                 inline_option='machine',
308                                                 filename_option='mlist')
309        job_info = topic_common.item_parse_info(attribute_name='jobname',
310                                                use_leftover=True)
311        oth_info = topic_common.item_parse_info(attribute_name='one_time_hosts',
312                                                inline_option='one_time_hosts')
313        label_info = topic_common.item_parse_info(attribute_name='labels',
314                                                  inline_option='labels')
315
316        options, leftover = super(job_create_or_clone, self).parse(
317                [host_info, job_info, oth_info, label_info] + parse_info,
318                req_items='jobname')
319        self.data = {
320            'priority': options.priority,
321        }
322        jobname = getattr(self, 'jobname')
323        if len(jobname) > 1:
324            self.invalid_syntax('Too many arguments specified, only expected '
325                                'to receive job name: %s' % jobname)
326        self.jobname = jobname[0]
327
328        if self.one_time_hosts:
329            self.data['one_time_hosts'] = self.one_time_hosts
330
331        if self.labels:
332            label_hosts = self.execute_rpc(op='get_hosts',
333                                           multiple_labels=self.labels)
334            for host in label_hosts:
335                self.hosts.append(host['hostname'])
336
337        self.data['name'] = self.jobname
338
339        (self.data['hosts'],
340         self.data['meta_hosts']) = self._parse_hosts(self.hosts)
341
342        self.data['email_list'] = options.email
343
344        return options, leftover
345
346
347    def create_job(self):
348        job_id = self.execute_rpc(op='create_job', **self.data)
349        return ['%s (id %s)' % (self.jobname, job_id)]
350
351
352    def get_items(self):
353        return [self.jobname]
354
355
356
357class job_create(job_create_or_clone):
358    """atest job create [--priority <int>]
359    [--synch_count] [--control-file </path/to/cfile>]
360    [--on-server] [--test <test1,test2>]
361    [--mlist </path/to/machinelist>] [--machine <host1 host2 host3>]
362    [--labels <list of labels of machines to run on>]
363    [--reboot_before <option>] [--reboot_after <option>]
364    [--noverify] [--timeout <timeout>] [--max_runtime <max runtime>]
365    [--one-time-hosts <hosts>] [--email <email>]
366    [--dependencies <labels this job is dependent on>]
367    [--parse-failed-repair <option>]
368    [--image <http://path/to/image>] [--require-ssp]
369    job_name
370
371    Creating a job is rather different from the other create operations,
372    so it only uses the __init__() and output() from its superclass.
373    """
374    op_action = 'create'
375
376    def __init__(self):
377        super(job_create, self).__init__()
378        self.ctrl_file_data = {}
379        self.parser.add_option('-y', '--synch_count', type=int,
380                               help='Number of machines to use per autoserv '
381                                    'execution')
382        self.parser.add_option('-f', '--control-file',
383                               help='use this control file', metavar='FILE')
384        self.parser.add_option('-s', '--server',
385                               help='This is server-side job',
386                               action='store_true', default=False)
387        self.parser.add_option('-t', '--test',
388                               help='List of tests to run')
389
390        self.parser.add_option('-d', '--dependencies', help='Comma separated '
391                               'list of labels this job is dependent on.',
392                               default='')
393
394        self.parser.add_option('-B', '--reboot_before',
395                               help='Whether or not to reboot the machine '
396                                    'before the job (never/if dirty/always)',
397                               type='choice',
398                               choices=('never', 'if dirty', 'always'))
399        self.parser.add_option('-a', '--reboot_after',
400                               help='Whether or not to reboot the machine '
401                                    'after the job (never/if all tests passed/'
402                                    'always)',
403                               type='choice',
404                               choices=('never', 'if all tests passed',
405                                        'always'))
406
407        self.parser.add_option('--parse-failed-repair',
408                               help='Whether or not to parse failed repair '
409                                    'results as part of the job',
410                               type='choice',
411                               choices=('true', 'false'))
412        self.parser.add_option('-n', '--noverify',
413                               help='Do not run verify for job',
414                               default=False, action='store_true')
415        self.parser.add_option('-o', '--timeout_mins',
416                               help='Job timeout in minutes.',
417                               metavar='TIMEOUT')
418        self.parser.add_option('--max_runtime',
419                               help='Job maximum runtime in minutes')
420
421        self.parser.add_option('-i', '--image',
422                               help='OS image to install before running the '
423                                    'test.')
424        self.parser.add_option('--require-ssp',
425                               help='Require server-side packaging',
426                               default=False, action='store_true')
427
428
429    def parse(self):
430        deps_info = topic_common.item_parse_info(attribute_name='dependencies',
431                                                 inline_option='dependencies')
432        options, leftover = super(job_create, self).parse(
433                parse_info=[deps_info])
434
435        if (len(self.hosts) == 0 and not self.one_time_hosts
436            and not options.labels):
437            self.invalid_syntax('Must specify at least one machine.'
438                                '(-m, -M, -b or --one-time-hosts).')
439        if not options.control_file and not options.test:
440            self.invalid_syntax('Must specify either --test or --control-file'
441                                ' to create a job.')
442        if options.control_file and options.test:
443            self.invalid_syntax('Can only specify one of --control-file or '
444                                '--test, not both.')
445        if options.control_file:
446            try:
447                control_file_f = open(options.control_file)
448                try:
449                    control_file_data = control_file_f.read()
450                finally:
451                    control_file_f.close()
452            except IOError:
453                self.generic_error('Unable to read from specified '
454                                   'control-file: %s' % options.control_file)
455            self.data['control_file'] = control_file_data
456        if options.test:
457            if options.server:
458                self.invalid_syntax('If you specify tests, then the '
459                                    'client/server setting is implicit and '
460                                    'cannot be overriden.')
461            tests = [t.strip() for t in options.test.split(',') if t.strip()]
462            self.ctrl_file_data['tests'] = tests
463
464        if options.image:
465            self.data['image'] = options.image
466
467        if options.reboot_before:
468            self.data['reboot_before'] = options.reboot_before.capitalize()
469        if options.reboot_after:
470            self.data['reboot_after'] = options.reboot_after.capitalize()
471        if options.parse_failed_repair:
472            self.data['parse_failed_repair'] = (
473                options.parse_failed_repair == 'true')
474        if options.noverify:
475            self.data['run_verify'] = False
476        if options.timeout_mins:
477            self.data['timeout_mins'] = options.timeout_mins
478        if options.max_runtime:
479            self.data['max_runtime_mins'] = options.max_runtime
480
481        self.data['dependencies'] = self.dependencies
482
483        if options.synch_count:
484            self.data['synch_count'] = options.synch_count
485        if options.server:
486            self.data['control_type'] = control_data.CONTROL_TYPE_NAMES.SERVER
487        else:
488            self.data['control_type'] = control_data.CONTROL_TYPE_NAMES.CLIENT
489
490        self.data['require_ssp'] = options.require_ssp
491
492        return options, leftover
493
494
495    def execute(self):
496        if self.ctrl_file_data:
497            cf_info = self.execute_rpc(op='generate_control_file',
498                                       item=self.jobname,
499                                       **self.ctrl_file_data)
500
501            self.data['control_file'] = cf_info['control_file']
502            if 'synch_count' not in self.data:
503                self.data['synch_count'] = cf_info['synch_count']
504            if cf_info['is_server']:
505                self.data['control_type'] = control_data.CONTROL_TYPE_NAMES.SERVER
506            else:
507                self.data['control_type'] = control_data.CONTROL_TYPE_NAMES.CLIENT
508
509            # Get the union of the 2 sets of dependencies
510            deps = set(self.data['dependencies'])
511            deps = sorted(deps.union(cf_info['dependencies']))
512            self.data['dependencies'] = list(deps)
513
514        if 'synch_count' not in self.data:
515            self.data['synch_count'] = 1
516
517        return self.create_job()
518
519
520class job_clone(job_create_or_clone):
521    """atest job clone [--priority <int>]
522    [--mlist </path/to/machinelist>] [--machine <host1 host2 host3>]
523    [--labels <list of labels of machines to run on>]
524    [--one-time-hosts <hosts>] [--email <email>]
525    job_name
526
527    Cloning a job is rather different from the other create operations,
528    so it only uses the __init__() and output() from its superclass.
529    """
530    op_action = 'clone'
531    usage_action = 'clone'
532
533    def __init__(self):
534        super(job_clone, self).__init__()
535        self.parser.add_option('-i', '--id', help='Job id to clone',
536                               default=False,
537                               metavar='JOB_ID')
538        self.parser.add_option('-r', '--reuse-hosts',
539                               help='Use the exact same hosts as the '
540                               'cloned job.',
541                               action='store_true', default=False)
542
543
544    def parse(self):
545        options, leftover = super(job_clone, self).parse()
546
547        self.clone_id = options.id
548        self.reuse_hosts = options.reuse_hosts
549
550        host_specified = self.hosts or self.one_time_hosts or options.labels
551        if self.reuse_hosts and host_specified:
552            self.invalid_syntax('Cannot specify hosts and reuse the same '
553                                'ones as the cloned job.')
554
555        if not (self.reuse_hosts or host_specified):
556            self.invalid_syntax('Must reuse or specify at least one '
557                                'machine (-r, -m, -M, -b or '
558                                '--one-time-hosts).')
559
560        return options, leftover
561
562
563    def execute(self):
564        clone_info = self.execute_rpc(op='get_info_for_clone',
565                                      id=self.clone_id,
566                                      preserve_metahosts=self.reuse_hosts)
567
568        # Remove fields from clone data that cannot be reused
569        for field in ('name', 'created_on', 'id', 'owner'):
570            del clone_info['job'][field]
571
572        # Also remove parameterized_job field, as the feature still is
573        # incomplete, this tool does not attempt to support it for now,
574        # it uses a different API function and it breaks create_job()
575        if clone_info['job'].has_key('parameterized_job'):
576            del clone_info['job']['parameterized_job']
577
578        # Keyword args cannot be unicode strings
579        self.data.update((str(key), val)
580                         for key, val in clone_info['job'].iteritems())
581
582        if self.reuse_hosts:
583            # Convert host list from clone info that can be used for job_create
584            for label, qty in clone_info['meta_host_counts'].iteritems():
585                self.data['meta_hosts'].extend([label]*qty)
586
587            self.data['hosts'].extend(host['hostname']
588                                      for host in clone_info['hosts'])
589
590        return self.create_job()
591
592
593class job_abort(job, action_common.atest_delete):
594    """atest job abort <job(s)>"""
595    usage_action = op_action = 'abort'
596    msg_done = 'Aborted'
597
598    def parse(self):
599        job_info = topic_common.item_parse_info(attribute_name='jobids',
600                                                use_leftover=True)
601        options, leftover = super(job_abort, self).parse([job_info],
602                                                         req_items='jobids')
603
604
605    def execute(self):
606        data = {'job__id__in': self.jobids}
607        self.execute_rpc(op='abort_host_queue_entries', **data)
608        print 'Aborting jobs: %s' % ', '.join(self.jobids)
609
610
611    def get_items(self):
612        return self.jobids
613