/external/skia/bin/ |
D | try.py | 48 jobs = json.loads(subprocess.check_output([ 50 return (BUCKET_SKIA_INTERNAL, jobs) 67 jobs = [] 69 jobs.append((BUCKET_SKIA_PRIMARY, json.load(f))) 71 jobs.append(get_jobs(REPO_INTERNAL)) 72 jobs.extend(update_meta_config.CQ_INCLUDE_CHROMIUM_TRYBOTS) 75 for bucket, job_list in jobs: 79 jobs = filtered_jobs 82 if len(jobs) == 0: 86 for bucket, job_list in jobs: [all …]
|
/external/skqp/bin/ |
D | try.py | 48 jobs = json.loads(subprocess.check_output([ 50 return (BUCKET_SKIA_INTERNAL, jobs) 67 jobs = [] 69 jobs.append((BUCKET_SKIA_PRIMARY, json.load(f))) 71 jobs.append(get_jobs(REPO_INTERNAL)) 72 jobs.extend(update_meta_config.CQ_INCLUDE_CHROMIUM_TRYBOTS) 75 for bucket, job_list in jobs: 79 jobs = filtered_jobs 82 if len(jobs) == 0: 86 for bucket, job_list in jobs: [all …]
|
/external/mesa3d/src/intel/vulkan/tests/ |
D | state_pool_no_free.c | 37 } jobs[NUM_THREADS]; variable 68 jobs[i].pool = &state_pool; in run_test() 69 jobs[i].id = i; in run_test() 70 pthread_create(&jobs[i].thread, NULL, alloc_states, &jobs[i]); in run_test() 74 pthread_join(jobs[i].thread, NULL); in run_test() 89 if (thread_max < jobs[i].offsets[next[i]]) { in run_test() 90 thread_max = jobs[i].offsets[next[i]]; in run_test() 102 assert(jobs[max_thread_idx].offsets[next[max_thread_idx]] > highest); in run_test() 104 highest = jobs[max_thread_idx].offsets[next[max_thread_idx]]; in run_test()
|
D | block_pool_no_free.c | 38 } jobs[NUM_THREADS]; variable 117 jobs[i].pool = &pool; in run_test() 118 jobs[i].id = i; in run_test() 119 pthread_create(&jobs[i].thread, NULL, alloc_blocks, &jobs[i]); in run_test() 123 pthread_join(jobs[i].thread, NULL); in run_test() 128 block_ptrs[i] = jobs[i].blocks; in run_test() 133 block_ptrs[i] = jobs[i].back_blocks; in run_test()
|
D | state_pool_test_helper.h | 30 } jobs[NUM_THREADS]; variable 64 jobs[i].pool = state_pool; in run_state_pool_test() 65 jobs[i].id = i; in run_state_pool_test() 66 pthread_create(&jobs[i].thread, NULL, alloc_states, &jobs[i]); in run_state_pool_test() 70 pthread_join(jobs[i].thread, NULL); in run_state_pool_test()
|
/external/mesa3d/src/gallium/auxiliary/util/ |
D | u_queue.c | 145 job = queue->jobs[queue->read_idx]; in PIPE_THREAD_ROUTINE() 146 memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job)); in PIPE_THREAD_ROUTINE() 163 while (queue->jobs[queue->read_idx].job) { in PIPE_THREAD_ROUTINE() 164 util_queue_fence_signal(queue->jobs[queue->read_idx].fence); in PIPE_THREAD_ROUTINE() 166 queue->jobs[queue->read_idx].job = NULL; in PIPE_THREAD_ROUTINE() 187 queue->jobs = (struct util_queue_job*) in util_queue_init() 189 if (!queue->jobs) in util_queue_init() 230 if (queue->jobs) { in util_queue_init() 234 FREE(queue->jobs); in util_queue_init() 266 FREE(queue->jobs); in util_queue_destroy() [all …]
|
/external/libdrm/tests/exynos/ |
D | exynos_fimg2d_event.c | 103 static void wait_all_jobs(struct g2d_job* jobs, unsigned num_jobs) in wait_all_jobs() argument 108 while (jobs[i].busy) in wait_all_jobs() 114 static struct g2d_job* free_job(struct g2d_job* jobs, unsigned num_jobs) in free_job() argument 119 if (jobs[i].busy == 0) in free_job() 120 return &jobs[i]; in free_job() 129 struct g2d_job *jobs = calloc(num_jobs, sizeof(struct g2d_job)); in g2d_work() local 135 jobs[i].id = i; in g2d_work() 143 j = free_job(jobs, num_jobs); in g2d_work() 182 wait_all_jobs(jobs, num_jobs); in g2d_work() 183 free(jobs); in g2d_work()
|
/external/autotest/server/cros/dynamic_suite/ |
D | job_status_unittest.py | 58 jobs = [FakeJob(0, [FakeStatus('GOOD', 'T0', ''), 75 for status in jobs[4].statuses: 78 job_id_set = set([job.id for job in jobs]) 80 [jobs[1]], 81 [jobs[0], jobs[2]], 82 jobs[3:6] 95 waiter.add_jobs(jobs) 97 for job in jobs[:6]: # the 'GOOD' SERVER_JOB shouldn't be there.
|
/external/toolchain-utils/automation/common/ |
D | job_group.py | 29 jobs=None, argument 36 self.jobs = [] 41 if jobs: 42 for job in jobs: 69 job) for job in self.jobs]) 72 self.jobs.append(job)
|
/external/swiftshader/third_party/LLVM/utils/ |
D | llvmbuild | 300 def __init__(self, work_queue, jobs, 305 self.jobs = jobs 517 llvm=dict(debug=["-j" + str(self.jobs)], 518 release=["-j" + str(self.jobs)], 519 paranoid=["-j" + str(self.jobs)]), 520 llvm_gcc=dict(debug=["-j" + str(self.jobs), 522 release=["-j" + str(self.jobs), 524 paranoid=["-j" + str(self.jobs), 526 llvm2=dict(debug=["-j" + str(self.jobs)], 527 release=["-j" + str(self.jobs)], [all …]
|
/external/autotest/scheduler/ |
D | host_scheduler_unittests.py | 343 jobs = self.create_suite(num=2) 345 params=(jobs[0].id,))[0] 347 hqe = self.assign_host_to_job(host1, jobs[0], r) 348 self.verify_state(r, {jobs['parent_job'].id:1}, 349 {host1.id: jobs['parent_job'].id}) 352 self.verify_state(r, {jobs['parent_job'].id:1}, 353 {host1.id: jobs['parent_job'].id}) 355 self.assign_host_to_job(host2, jobs[1], r) 356 self.verify_state(r, {jobs['parent_job'].id:2}, 357 {host1.id: jobs['parent_job'].id, [all …]
|
/external/autotest/scheduler/shard/ |
D | simple_heartbeat_server.py | 78 jobs = models.Job.objects.filter( 82 return jobs[:job_limit] if job_limit is not None else jobs 95 def _create_packet(hosts, jobs): argument 98 'jobs': [j.serialize() for j in jobs] 124 job_time, jobs = self._get_jobs(board, job_limit) 126 serialize_time, heartbeat_packet = self._create_packet(hosts, jobs)
|
/external/skqp/site/dev/testing/ |
D | automated_testing.md | 20 Multiple jobs may share the same task, for example, tests on two different 23 Each Skia repository has an `infra/bots/tasks.json` file which defines the jobs 24 and tasks for the repo. Most jobs will run at every commit, but it is possible 25 to specify nightly and weekly jobs as well. For convenience, most repos also 45 repo. You need to have permission to trigger try jobs; if you need permission, 52 or using `bin/try`, a small wrapper for `git cl try` which helps to choose try jobs. 96 <a name="adding-new-jobs"></a> 97 Adding new jobs 100 If you would like to add jobs to build or test new configurations, please file a 103 If you know that the new jobs will need new hardware or you aren't sure which [all …]
|
/external/skia/site/dev/testing/ |
D | automated_testing.md | 20 Multiple jobs may share the same task, for example, tests on two different 23 Each Skia repository has an `infra/bots/tasks.json` file which defines the jobs 24 and tasks for the repo. Most jobs will run at every commit, but it is possible 25 to specify nightly and weekly jobs as well. For convenience, most repos also 45 repo. You need to have permission to trigger try jobs; if you need permission, 52 or using `bin/try`, a small wrapper for `git cl try` which helps to choose try jobs. 96 <a name="adding-new-jobs"></a> 97 Adding new jobs 100 If you would like to add jobs to build or test new configurations, please file a 103 If you know that the new jobs will need new hardware or you aren't sure which [all …]
|
/external/glide/library/src/main/java/com/bumptech/glide/load/engine/ |
D | Engine.java | 30 private final Map<Key, EngineJob> jobs; field in Engine 63 Map<Key, EngineJob> jobs, EngineKeyFactory keyFactory, in Engine() argument 79 if (jobs == null) { in Engine() 80 jobs = new HashMap<Key, EngineJob>(); in Engine() 82 this.jobs = jobs; in Engine() 177 EngineJob current = jobs.get(key); in load() 190 jobs.put(key, engineJob); in load() 237 jobs.remove(key); in onEngineJobComplete() 242 EngineJob current = jobs.get(key); in onEngineJobCancelled() 244 jobs.remove(key); in onEngineJobCancelled()
|
/external/v8/tools/testrunner/objects/ |
D | peer.py | 30 def __init__(self, address, jobs, rel_perf, pubkey): argument 32 self.jobs = jobs # integer: number of CPUs 44 (self.address, self.jobs, self.relative_performance, 74 return [self.address, self.jobs, self.relative_performance]
|
/external/autotest/frontend/afe/ |
D | direct_afe_unittest.py | 14 jobs = afe.get_jobs() 15 self.assertEquals(len(jobs), 0) 25 jobs = afe.get_jobs() 26 self.assertEquals(len(jobs), 1)
|
/external/libcups/cups/ |
D | util.c | 249 cups_job_t *jobs) /* I - Jobs */ in cupsFreeJobs() argument 255 if (num_jobs <= 0 || !jobs) in cupsFreeJobs() 258 for (i = num_jobs, job = jobs; i > 0; i --, job ++) in cupsFreeJobs() 266 free(jobs); in cupsFreeJobs() 394 cupsGetJobs(cups_job_t **jobs, /* O - Job data */ in cupsGetJobs() argument 403 return (cupsGetJobs2(CUPS_HTTP_DEFAULT, jobs, name, myjobs, whichjobs)); in cupsGetJobs() 421 cups_job_t **jobs, /* O - Job data */ in cupsGetJobs2() argument 464 if (!jobs) in cupsGetJobs2() 534 *jobs = NULL; in cupsGetJobs2() 628 temp = realloc(*jobs, sizeof(cups_job_t) * (size_t)(n + 1)); in cupsGetJobs2() [all …]
|
/external/v8/tools/testrunner/server/ |
D | presence_handler.py | 60 jobs = data[1] 64 response = [STARTUP_RESPONSE, self.server.daemon.jobs, 69 p = peer.Peer(self.client_address[0], jobs, relative_perf, 75 jobs = data[1] 78 p = peer.Peer(self.client_address[0], jobs, perf, pubkey_fingerprint) 117 request = [STARTUP_REQUEST, self.daemon.jobs, self.daemon.relative_perf,
|
/external/llvm/utils/lit/lit/ |
D | run.py | 190 def execute_tests(self, display, jobs, max_time=None, argument 215 if jobs != 1 and use_processes and multiprocessing: 220 consumer = MultiprocessResultsConsumer(self, display, jobs) 241 queuer = task_impl(target=provider.queue_tests, args=(self.tests, jobs)) 259 if jobs == 1: 263 self._execute_tests_in_parallel(task_impl, provider, consumer, jobs) 276 def _execute_tests_in_parallel(self, task_impl, provider, consumer, jobs): argument 280 for i in range(jobs)]
|
/external/llvm/utils/ |
D | llvm-compilers-check | 255 def __init__(self, work_queue, jobs, argument 260 self.jobs = jobs 405 llvm=dict(debug=["-j" + str(self.jobs)], 406 release=["-j" + str(self.jobs)], 407 paranoid=["-j" + str(self.jobs)]), 408 dragonegg=dict(debug=["-j" + str(self.jobs)], 409 release=["-j" + str(self.jobs)], 410 paranoid=["-j" + str(self.jobs)])) 597 jobs = options.jobs // options.threads variable 598 if jobs == 0: [all …]
|
/external/vixl/tools/ |
D | lint.py | 128 jobs = 1, argument 155 pool = multiprocessing.Pool(jobs) 238 def RunLinter(files, jobs=1, progress_prefix='', cached=True): argument 242 jobs=jobs, 261 retcode = RunLinter(files, jobs=args.jobs, cached=cached)
|
/external/tensorflow/tensorflow/python/estimator/ |
D | run_config.py | 95 jobs = cluster_spec.jobs 100 if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1: 151 if chief_task_type not in cluster_spec.jobs: 183 if task_type not in cluster_spec.jobs: 211 t for t in sorted(cluster_spec.jobs) 214 if TaskType.PS in cluster_spec.jobs: 474 if self._cluster_spec and TaskType.MASTER in self._cluster_spec.jobs: 532 if TaskType.CHIEF in self._cluster_spec.jobs:
|
/external/toolchain-utils/automation/server/ |
D | job_group_manager.py | 57 for job_ in group.jobs: 70 for job_ in group.jobs: 93 for job_ in group.jobs: 102 for other_job in group.jobs: 115 for job_ in group.jobs:
|
/external/fonttools/Lib/fontTools/ |
D | ttx.py | 264 jobs = [] 288 jobs.append((action, input, output)) 289 return jobs, options 292 def process(jobs, options): argument 293 for action, input, output in jobs: 307 jobs, options = parseOptions(args) 309 process(jobs, options)
|