/external/libcups/cups/ |
D | dest-job.c | 37 int job_id) /* I - Job ID */ in cupsCancelDestJob() argument 51 ippAddInteger(request, IPP_TAG_OPERATION, IPP_TAG_INTEGER, "job-id", job_id); in cupsCancelDestJob() 77 int job_id) /* I - Job ID */ in cupsCloseDestJob() argument 84 …http, (void *)dest, dest ? dest->name : NULL, dest ? dest->instance : NULL, (void *)info, job_id)); in cupsCloseDestJob() 97 if (!http || !dest || !info || job_id <= 0) in cupsCloseDestJob() 135 job_id); in cupsCloseDestJob() 168 int *job_id, /* O - Job ID or 0 on error */ in cupsCreateDestJob() argument 179 …? dest->name : NULL, dest ? dest->instance : NULL, (void *)info, (void *)job_id, title, num_option… in cupsCreateDestJob() 192 if (job_id) in cupsCreateDestJob() 193 *job_id = 0; in cupsCreateDestJob() [all …]
|
D | util.c | 44 …int job_id) /* I - Job ID, @code CUPS_JOBID_CURRENT@ for the current job, or @code CUPS_JOB… in cupsCancelJob() argument 46 return (cupsCancelJob2(CUPS_HTTP_DEFAULT, name, job_id, 0) in cupsCancelJob() 69 …int job_id, /* I - Job ID, @code CUPS_JOBID_CURRENT@ for the current job, or @code CUPS_JOB… in cupsCancelJob2() argument 80 if (job_id < -1 || (!name && job_id == 0)) in cupsCancelJob2() 105 request = ippNewRequest(job_id < 0 ? IPP_OP_PURGE_JOBS : IPP_OP_CANCEL_JOB); in cupsCancelJob2() 115 job_id); in cupsCancelJob2() 117 else if (job_id > 0) in cupsCancelJob2() 119 snprintf(uri, sizeof(uri), "ipp://localhost/jobs/%d", job_id); in cupsCancelJob2() 127 if (purge && job_id >= 0) in cupsCancelJob2() 129 else if (!purge && job_id < 0) in cupsCancelJob2() [all …]
|
D | notify.c | 38 ipp_attribute_t *job_id, /* notify-job-id */ in cupsNotifySubject() local 58 job_id = ippFindAttribute(event, "notify-job-id", IPP_TAG_INTEGER); in cupsNotifySubject() 68 if (job_id && printer_name && printer_uri && job_state) in cupsNotifySubject() 107 job_id->values[0].integer, in cupsNotifySubject()
|
/external/webrtc/third_party/gtest-parallel/ |
D | gtest-parallel | 135 def handle_meta(self, job_id, args): argument 139 self.tests[job_id] = (binary, test.strip()) 143 (binary, test) = self.tests[job_id] 146 self.failures.append(self.tests[job_id]) 147 with open(self.outputs[job_id]) as f: 157 def logfile(self, job_id, name): argument 158 self.outputs[job_id] = name 182 def logfile(self, job_id, name): argument 183 with open(self.outputs[job_id]) as f: 185 self.log(str(job_id) + '> ' + line.rstrip()) [all …]
|
/external/autotest/venv/lucifer/cmd/ |
D | job_reporter.py | 43 with leasing.obtain_lease(_lease_path(args.jobdir, args.job_id)): 102 job = models.Job.objects.get(id=args.job_id) 107 _mark_handoff_completed(args.job_id) 140 '-abortsock', _abort_sock_path(args.jobdir, args.job_id), 193 def _mark_handoff_completed(job_id): argument 195 handoff = models.JobHandoff.objects.get(job_id=job_id) 200 def _abort_sock_path(jobdir, job_id): argument 201 return _lease_path(jobdir, job_id) + '.sock' 204 def _lease_path(jobdir, job_id): argument 205 return os.path.join(jobdir, str(job_id))
|
/external/autotest/server/cros/dynamic_suite/ |
D | reporting_utils.py | 192 def link_job(job_id, instance_server=None): argument 202 if not job_id: 209 return _job_view % (instance_server, job_id) 212 def _base_results_log(job_id, result_owner, hostname): argument 223 if job_id and result_owner and hostname: 224 path_to_object = '%s-%s/%s' % (job_id, result_owner, 230 def link_result_logs(job_id, result_owner, hostname): argument 241 base_results = _base_results_log(job_id, result_owner, hostname) 246 (job_id, result_owner, hostname)) 249 def link_status_log(job_id, result_owner, hostname): argument [all …]
|
D | tools.py | 285 def create_bug_keyvals(job_id, testname, bug_info): argument 296 keyval_base = '%s_%s' % (job_id, testname) if job_id else testname 303 def get_test_failure_bug_info(keyvals, job_id, testname): argument 327 keyval_base = '%s_%s' % (job_id, testname) if job_id else testname
|
/external/autotest/site_utils/ |
D | lxc_cleanup.py | 56 job_id = container.id.job_id 65 hqes = AFE.get_host_queue_entries(job_id=job_id) 67 logging.error('Failed to get hqe for job %s. Error: %s.', job_id, e) 76 'not orphaned.', job_id, container.name) 82 job_id) 86 job_id, container.name)
|
D | job_history.py | 125 def try_get(self, host_id, job_id, start_time, end_time): argument 138 return self[host_id].try_get(job_id, start_time, end_time) 168 def try_get(self, job_id, start_time, end_time): argument 181 task.queue_entry.job.id == job_id] 339 hqe = models.HostQueueEntry.objects.filter(job_id=hqe.job.id)[0] 352 job_hqe = models.HostQueueEntry.objects.filter(job_id=job.id)[0] 382 def get_job_info(job_id): argument 392 hqe = models.HostQueueEntry.objects.filter(job_id=job_id)[0] 394 raise Exception('No HQE found for job ID %d' % job_id) 412 job_info = get_job_info(options.job_id)
|
D | test_runner_utils.py | 116 def handle_local_result(self, job_id, results_dir, record): argument 126 logging.debug('Parsing test results for job %s',job_id) 128 logging.debug('Handling result of job %s',job_id) 131 logging.debug('All tests for job %s succeeded, no retry', job_id) 132 if self._retry_handler.job_present(job_id): 133 self._retry_handler.set_attempted(job_id) 138 self._retry_handler._should_retry_local_job(job_id)) 140 new_job_id = self._retry_local_result(job_id, record) 143 def _retry_local_result(self, job_id, record): argument 154 test = self._jobs_to_tests[job_id] [all …]
|
D | test_push.py | 137 job_id = rpc_utils.create_job_common( 142 while not TKO.get_job_test_statuses_from_db(job_id): 144 AFE.run('abort_host_queue_entries', job=job_id) 149 verify_test_results(job_id, 312 hqes = [models.HostQueueEntry.objects.filter(job_id=job_id)[0] 313 for job_id in job_ids] 348 def verify_test_results(job_id, expected_results): argument 356 test_views = site_utils.get_test_views_from_tko(job_id, TKO) 361 job_name = '%s-%s' % (job_id, getpass.getuser())
|
D | test_runner_utils_unittest.py | 288 job_id = afe.create_job(control_file, hosts=self._hosts) 289 self._jobs.append(job_id) 290 self._jobs_to_tests[job_id] = control_file 292 def handle_local_result(self, job_id, results_dir, logger, argument 303 control_file = self._jobs_to_tests.get(job_id) 304 job_id = afe.create_job(control_file, hosts=self._hosts) 305 self._jobs.append(job_id) 306 self._jobs_to_tests[job_id] = control_file 307 return job_id
|
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/ |
D | grpc_channel.h | 40 HostPortsJob(const string& job_id, const std::map<int, string>& host_ports) in HostPortsJob() 41 : job_id(job_id), host_ports(host_ports) {} in HostPortsJob() 42 const string job_id; member 46 Status AddHostPortsJob(const string& job_id, 49 Status AddHostPortsJob(const string& job_id,
|
D | grpc_channel.cc | 115 Status GrpcChannelSpec::AddHostPortsJob(const string& job_id, in AddHostPortsJob() argument 121 return AddHostPortsJob(job_id, host_ports_map); in AddHostPortsJob() 125 const string& job_id, const std::map<int, string>& host_ports) { in AddHostPortsJob() argument 126 if (!job_ids_.insert(job_id).second) { in AddHostPortsJob() 128 "Duplicate job ID in cluster specification: ", job_id); in AddHostPortsJob() 133 host_ports_jobs_.emplace_back(job_id, host_ports); in AddHostPortsJob() 244 SparseGrpcChannelCache(const string& job_id, in SparseGrpcChannelCache() argument 247 : job_id_(job_id), in SparseGrpcChannelCache() 332 new SparseGrpcChannelCache(job.job_id, job.host_ports, channel_func)); in NewGrpcChannelCache()
|
/external/toolchain-utils/automation/server/ |
D | job_manager.py | 104 def KillJob(self, job_id): argument 110 self._KillJob(job_id) 112 def GetJob(self, job_id): argument 114 if job_.id == job_id: 118 def _KillJob(self, job_id): argument 119 self._logger.info('Killing [Job: %d].', job_id) 121 if job_id in self.job_executer_mapping: 122 self.job_executer_mapping[job_id].Kill() 124 if job_.id == job_id:
|
/external/autotest/scheduler/ |
D | rdb_testing_utils.py | 120 def create_special_task(cls, job_id=None, host_id=None, argument 123 if job_id: 124 queue_entry = cls.get_hqes(job_id=job_id)[0] 172 def assign_job_to_shard(cls, job_id, shard_hostname): argument 180 job_filter = models.Job.objects.filter(id=job_id, shard__isnull=True) 257 def add_host_to_job(cls, host, job_id, activate=0): argument 267 hqe = models.HostQueueEntry.objects.get(job_id=job_id) 269 raise ValueError('HQE for job %s already has a host' % job_id) 277 def increment_priority(cls, job_id): argument 278 job = models.Job.objects.get(id=job_id) [all …]
|
D | rdb_lib.py | 31 jobs = [queue_entry.job_id for queue_entry in queue_entries] 45 job_id = queue_entry.job_id 47 for dep in self._job_deps.get(job_id, []): 53 job_acls = self._job_acls.get(job_id, [])
|
/external/autotest/venv/lucifer/ |
D | leasing_unittest.py | 195 def _abort_socket(tmpdir, job_id): argument 204 path = os.path.join(str(tmpdir), '%d.sock' % job_id) 219 def _abort_socket_norecv(tmpdir, job_id): argument 230 path = os.path.join(str(tmpdir), '%d.sock' % job_id) 250 def _make_lease(tmpdir, job_id): argument 251 return _make_lease_file(str(tmpdir), job_id) 254 def _make_lease_file(jobdir, job_id): argument 260 path = os.path.join(jobdir, str(job_id))
|
/external/toolchain-utils/automation/server/monitor/ |
D | dashboard.py | 40 def __init__(self, job_id): argument 41 self._job = pickle.loads(GetServerConnection().GetJob(job_id)) 181 def JobPageHandler(request, job_id): argument 182 job = JobInfo(int(job_id)) 185 'job_id': job_id, 193 def LogPageHandler(request, job_id): argument 194 job = JobInfo(int(job_id)) 196 ctx = MakeDefaultContext({'job_id': job_id, 'log_lines': job.GetLog()})
|
/external/autotest/database/ |
D | schema_129.sql | 230 `job_id` int(11) DEFAULT NULL, 243 UNIQUE KEY `host_queue_entries_job_id_and_host_id` (`job_id`,`host_id`), 256 KEY `afe_host_queue_entries_job_id` (`job_id`), 259 …CONSTRAINT `host_queue_entries_job_id_fk` FOREIGN KEY (`job_id`) REFERENCES `afe_jobs` (`id`) ON D… 338 `job_id` int(11) DEFAULT NULL, 341 UNIQUE KEY `ineligible_host_queues_both_ids` (`host_id`,`job_id`), 342 KEY `ineligible_host_queues_job_id` (`job_id`), 344 …CONSTRAINT `ineligible_host_queues_job_id_fk` FOREIGN KEY (`job_id`) REFERENCES `afe_jobs` (`id`) … 356 `job_id` int(11) NOT NULL, 360 PRIMARY KEY (`job_id`), [all …]
|
/external/autotest/contrib/ |
D | always_failing_tests.py | 98 job_id = job.parent_job 99 if not job_id: 100 job_id = job 101 x = rgx.search(job_id.name) 103 print job_id.name
|
D | log_distiller.py | 316 self.job_id = kwargs['job_id'] 320 (self.filter_command, self.job_id)) 358 job_id = int(sys.argv[1]) 360 suite_jobs = rpc.run('get_jobs', id=job_id) 362 suite_jobs = rpc.run('get_jobs', parent_job=job_id) 369 log_crawler = SchedulerLogCrawler(logfile, job_id=job['id'])
|
/external/autotest/frontend/migrations/ |
D | 022_implement_sync_count.py | 29 for id, job_id, status, complete, hostname in hqes: 30 if job_id in synch_jobs or job_hqe_count[job_id] == 1:
|
/external/autotest/cli/ |
D | job.py | 74 for job_id in self.jobs: 75 if job_id.isdigit(): 76 job_ids.append(job_id) 78 job_names.append(job_id) 192 job_id = qe['job']['id'] 194 hosts_status.setdefault(job_id, 199 job_id = job['id'] 200 if hosts_status.has_key(job_id): 201 this_job = hosts_status[job_id] 348 job_id = self.execute_rpc(op='create_job', **self.data) [all …]
|
/external/autotest/client/site_tests/graphics_dEQP/scripts/ |
D | process_logs.py | 62 job_id = gs_path.split('/')[3].split('-')[0] 64 name = os.path.join('logs', job_id + '_graphics_dEQP.DEBUG') 65 logs.append(Logfile(job_id, name, gs_path)) 74 job_id = name.split('_')[0] 75 logs.append(Logfile(job_id, name, name)) 77 job_id = name.split('_')[0] 78 logs.append(Logfile(job_id, name, name))
|