/external/skia/bin/ |
D | try.py | 48 jobs = json.loads(subprocess.check_output([ 50 return (BUCKET_SKIA_INTERNAL, jobs) 67 jobs = [] 69 jobs.append((BUCKET_SKIA_PRIMARY, json.load(f))) 71 jobs.append(get_jobs(REPO_INTERNAL)) 72 jobs.extend(update_meta_config.CQ_INCLUDE_CHROMIUM_TRYBOTS) 75 for bucket, job_list in jobs: 79 jobs = filtered_jobs 82 if len(jobs) == 0: 86 for bucket, job_list in jobs: [all …]
|
/external/skqp/bin/ |
D | try.py | 48 jobs = json.loads(subprocess.check_output([ 50 return (BUCKET_SKIA_INTERNAL, jobs) 67 jobs = [] 69 jobs.append((BUCKET_SKIA_PRIMARY, json.load(f))) 71 jobs.append(get_jobs(REPO_INTERNAL)) 72 jobs.extend(update_meta_config.CQ_INCLUDE_CHROMIUM_TRYBOTS) 75 for bucket, job_list in jobs: 79 jobs = filtered_jobs 82 if len(jobs) == 0: 86 for bucket, job_list in jobs: [all …]
|
/external/mesa3d/src/util/ |
D | u_queue.c | 262 job = queue->jobs[queue->read_idx]; in util_queue_thread_func() 263 memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job)); in util_queue_thread_func() 282 if (queue->jobs[i].job) { in util_queue_thread_func() 283 util_queue_fence_signal(queue->jobs[i].fence); in util_queue_thread_func() 284 queue->jobs[i].job = NULL; in util_queue_thread_func() 308 queue->jobs = (struct util_queue_job*) in util_queue_init() 310 if (!queue->jobs) in util_queue_init() 367 if (queue->jobs) { in util_queue_init() 371 free(queue->jobs); in util_queue_init() 404 free(queue->jobs); in util_queue_destroy() [all …]
|
/external/mesa3d/src/intel/vulkan/tests/ |
D | state_pool_no_free.c | 37 } jobs[NUM_THREADS]; variable 69 jobs[i].pool = &state_pool; in run_test() 70 jobs[i].id = i; in run_test() 71 pthread_create(&jobs[i].thread, NULL, alloc_states, &jobs[i]); in run_test() 75 pthread_join(jobs[i].thread, NULL); in run_test() 90 if (thread_max < jobs[i].offsets[next[i]]) { in run_test() 91 thread_max = jobs[i].offsets[next[i]]; in run_test() 103 assert(jobs[max_thread_idx].offsets[next[max_thread_idx]] > highest); in run_test() 105 highest = jobs[max_thread_idx].offsets[next[max_thread_idx]]; in run_test()
|
D | block_pool_no_free.c | 38 } jobs[NUM_THREADS]; variable 44 uint32_t job_id = job - jobs; in alloc_blocks() 122 jobs[i].pool = &pool; in run_test() 123 jobs[i].id = i; in run_test() 124 pthread_create(&jobs[i].thread, NULL, alloc_blocks, &jobs[i]); in run_test() 128 pthread_join(jobs[i].thread, NULL); in run_test() 133 block_ptrs[i] = jobs[i].blocks; in run_test() 138 block_ptrs[i] = jobs[i].back_blocks; in run_test()
|
D | state_pool_test_helper.h | 30 } jobs[NUM_THREADS]; variable 64 jobs[i].pool = state_pool; in run_state_pool_test() 65 jobs[i].id = i; in run_state_pool_test() 66 pthread_create(&jobs[i].thread, NULL, alloc_states, &jobs[i]); in run_state_pool_test() 70 pthread_join(jobs[i].thread, NULL); in run_state_pool_test()
|
/external/grpc-grpc/tools/profiling/microbenchmarks/bm_diff/ |
D | bm_build.py | 57 def _make_cmd(cfg, benchmarks, jobs): argument 58 return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs] 61 def build(name, benchmarks, jobs, counters): argument 65 subprocess.check_call(_make_cmd('opt', benchmarks, jobs)) 67 subprocess.check_call(_make_cmd('counters', benchmarks, jobs)) 70 subprocess.check_call(_make_cmd('opt', benchmarks, jobs)) 72 subprocess.check_call(_make_cmd('counters', benchmarks, jobs)) 81 build(args.name, args.benchmarks, args.jobs, args.counters)
|
/external/autotest/scheduler/shard/ |
D | shard_client.py | 250 jobs = [] 261 jobs.append(job_to_upload) 262 return jobs 272 def _get_hqes_for_jobs(self, jobs): argument 274 for job in jobs: 295 jobs = models.Job.objects.filter(hostqueueentry__complete=False) 296 job_ids = list(jobs.values_list('id', flat=True)) 297 self._report_job_time_distribution(jobs) 326 jobs = [job.serialize(include_dependencies=False) for job in job_objs] 327 if len(jobs) > MAX_UPLOAD_JOBS: [all …]
|
D | simple_heartbeat_server.py | 78 jobs = models.Job.objects.filter( 82 return jobs[:job_limit] if job_limit is not None else jobs 95 def _create_packet(hosts, jobs): argument 98 'jobs': [j.serialize() for j in jobs] 124 job_time, jobs = self._get_jobs(board, job_limit) 126 serialize_time, heartbeat_packet = self._create_packet(hosts, jobs)
|
/external/libdrm/tests/exynos/ |
D | exynos_fimg2d_event.c | 110 static void wait_all_jobs(struct g2d_job* jobs, unsigned num_jobs) in wait_all_jobs() argument 115 while (jobs[i].busy) in wait_all_jobs() 121 static struct g2d_job* free_job(struct g2d_job* jobs, unsigned num_jobs) in free_job() argument 126 if (jobs[i].busy == 0) in free_job() 127 return &jobs[i]; in free_job() 136 struct g2d_job *jobs = calloc(num_jobs, sizeof(struct g2d_job)); in g2d_work() local 142 jobs[i].id = i; in g2d_work() 150 j = free_job(jobs, num_jobs); in g2d_work() 189 wait_all_jobs(jobs, num_jobs); in g2d_work() 190 free(jobs); in g2d_work()
|
/external/autotest/server/cros/dynamic_suite/ |
D | job_status_unittest.py | 58 jobs = [FakeJob(0, [FakeStatus('GOOD', 'T0', ''), 75 for status in jobs[4].statuses: 78 job_id_set = set([job.id for job in jobs]) 80 [jobs[1]], 81 [jobs[0], jobs[2]], 82 jobs[3:6] 95 waiter.add_jobs(jobs) 97 for job in jobs[:6]: # the 'GOOD' SERVER_JOB shouldn't be there.
|
/external/toolchain-utils/automation/common/ |
D | job_group.py | 29 jobs=None, argument 36 self.jobs = [] 41 if jobs: 42 for job in jobs: 69 job) for job in self.jobs]) 72 self.jobs.append(job)
|
/external/swiftshader/third_party/LLVM/utils/ |
D | llvmbuild | 300 def __init__(self, work_queue, jobs, 305 self.jobs = jobs 517 llvm=dict(debug=["-j" + str(self.jobs)], 518 release=["-j" + str(self.jobs)], 519 paranoid=["-j" + str(self.jobs)]), 520 llvm_gcc=dict(debug=["-j" + str(self.jobs), 522 release=["-j" + str(self.jobs), 524 paranoid=["-j" + str(self.jobs), 526 llvm2=dict(debug=["-j" + str(self.jobs)], 527 release=["-j" + str(self.jobs)], [all …]
|
/external/autotest/scheduler/ |
D | host_scheduler_unittests.py | 343 jobs = self.create_suite(num=2) 345 params=(jobs[0].id,))[0] 347 hqe = self.assign_host_to_job(host1, jobs[0], r) 348 self.verify_state(r, {jobs['parent_job'].id:1}, 349 {host1.id: jobs['parent_job'].id}) 352 self.verify_state(r, {jobs['parent_job'].id:1}, 353 {host1.id: jobs['parent_job'].id}) 355 self.assign_host_to_job(host2, jobs[1], r) 356 self.verify_state(r, {jobs['parent_job'].id:2}, 357 {host1.id: jobs['parent_job'].id, [all …]
|
/external/fonttools/Tests/ttx/ |
D | ttx_test.py | 85 jobs, _ = ttx.parseOptions([temp_path]) 86 self.assertEqual(jobs[0][0].__name__, "ttDump") 88 jobs[0][1:], 100 jobs, _ = ttx.parseOptions([temp_path]) 101 self.assertEqual(jobs[0][0].__name__, "ttDump") 103 jobs[0][1:], 114 jobs, _ = ttx.parseOptions([temp_path]) 115 self.assertEqual(jobs[0][0].__name__, "ttCompile") 117 jobs[0][1:], 129 jobs, _ = ttx.parseOptions([temp_path]) [all …]
|
/external/grpc-grpc/tools/profiling/qps/ |
D | qps_diff.py | 64 def _make_cmd(jobs): argument 65 return ['make', '-j', '%d' % jobs, 'qps_json_driver', 'qps_worker'] 68 def build(name, jobs): argument 72 subprocess.check_call(_make_cmd(jobs)) 75 subprocess.check_call(_make_cmd(jobs)) 145 build('new', args.jobs) 152 build('old', args.jobs)
|
/external/skia/site/dev/testing/ |
D | automated_testing.md | 20 Multiple jobs may share the same task, for example, tests on two different 23 Each Skia repository has an `infra/bots/tasks.json` file which defines the jobs 24 and tasks for the repo. Most jobs will run at every commit, but it is possible 25 to specify nightly and weekly jobs as well. For convenience, most repos also 45 repo. You need to have permission to trigger try jobs; if you need permission, 52 or using `bin/try`, a small wrapper for `git cl try` which helps to choose try jobs. 96 <a name="adding-new-jobs"></a> 97 Adding new jobs 100 If you would like to add jobs to build or test new configurations, please file a 103 If you know that the new jobs will need new hardware or you aren't sure which [all …]
|
/external/skqp/site/dev/testing/ |
D | automated_testing.md | 20 Multiple jobs may share the same task, for example, tests on two different 23 Each Skia repository has an `infra/bots/tasks.json` file which defines the jobs 24 and tasks for the repo. Most jobs will run at every commit, but it is possible 25 to specify nightly and weekly jobs as well. For convenience, most repos also 45 repo. You need to have permission to trigger try jobs; if you need permission, 52 or using `bin/try`, a small wrapper for `git cl try` which helps to choose try jobs. 96 <a name="adding-new-jobs"></a> 97 Adding new jobs 100 If you would like to add jobs to build or test new configurations, please file a 103 If you know that the new jobs will need new hardware or you aren't sure which [all …]
|
/external/glide/library/src/main/java/com/bumptech/glide/load/engine/ |
D | Engine.java | 30 private final Map<Key, EngineJob> jobs; field in Engine 63 Map<Key, EngineJob> jobs, EngineKeyFactory keyFactory, in Engine() argument 79 if (jobs == null) { in Engine() 80 jobs = new HashMap<Key, EngineJob>(); in Engine() 82 this.jobs = jobs; in Engine() 177 EngineJob current = jobs.get(key); in load() 190 jobs.put(key, engineJob); in load() 237 jobs.remove(key); in onEngineJobComplete() 242 EngineJob current = jobs.get(key); in onEngineJobCancelled() 244 jobs.remove(key); in onEngineJobCancelled()
|
/external/skia/tools/skqp/ |
D | find_commit_with_best_gold_results.py | 36 jobs = json.load(f) 37 for job in jobs: 57 def get_results_for_commit(commit, jobs): argument 72 for job in jobs for config in CONFIGS] 85 jobs = [j for j in get_jobs()] 89 results.append((commit_hash, get_results_for_commit(commit_hash, jobs)))
|
/external/autotest/frontend/afe/ |
D | direct_afe_unittest.py | 14 jobs = afe.get_jobs() 15 self.assertEquals(len(jobs), 0) 25 jobs = afe.get_jobs() 26 self.assertEquals(len(jobs), 1)
|
/external/libcups/cups/ |
D | util.c | 249 cups_job_t *jobs) /* I - Jobs */ in cupsFreeJobs() argument 255 if (num_jobs <= 0 || !jobs) in cupsFreeJobs() 258 for (i = num_jobs, job = jobs; i > 0; i --, job ++) in cupsFreeJobs() 266 free(jobs); in cupsFreeJobs() 394 cupsGetJobs(cups_job_t **jobs, /* O - Job data */ in cupsGetJobs() argument 403 return (cupsGetJobs2(CUPS_HTTP_DEFAULT, jobs, name, myjobs, whichjobs)); in cupsGetJobs() 421 cups_job_t **jobs, /* O - Job data */ in cupsGetJobs2() argument 464 if (!jobs) in cupsGetJobs2() 534 *jobs = NULL; in cupsGetJobs2() 628 temp = realloc(*jobs, sizeof(cups_job_t) * (size_t)(n + 1)); in cupsGetJobs2() [all …]
|
/external/syzkaller/sys/syz-sysgen/ |
D | sysgen.go | 76 var jobs []*Job 78 jobs = append(jobs, &Job{ 82 sort.Slice(jobs, func(i, j int) bool { 83 return jobs[i].Target.Arch < jobs[j].Target.Arch 86 wg.Add(len(jobs)) 88 for _, job := range jobs { 122 for _, job := range jobs { 142 if count == len(jobs) {
|
/external/llvm/utils/lit/lit/ |
D | run.py | 190 def execute_tests(self, display, jobs, max_time=None, argument 215 if jobs != 1 and use_processes and multiprocessing: 220 consumer = MultiprocessResultsConsumer(self, display, jobs) 241 queuer = task_impl(target=provider.queue_tests, args=(self.tests, jobs)) 259 if jobs == 1: 263 self._execute_tests_in_parallel(task_impl, provider, consumer, jobs) 276 def _execute_tests_in_parallel(self, task_impl, provider, consumer, jobs): argument 280 for i in range(jobs)]
|
/external/llvm/utils/ |
D | llvm-compilers-check | 255 def __init__(self, work_queue, jobs, argument 260 self.jobs = jobs 405 llvm=dict(debug=["-j" + str(self.jobs)], 406 release=["-j" + str(self.jobs)], 407 paranoid=["-j" + str(self.jobs)]), 408 dragonegg=dict(debug=["-j" + str(self.jobs)], 409 release=["-j" + str(self.jobs)], 410 paranoid=["-j" + str(self.jobs)])) 597 jobs = options.jobs // options.threads variable 598 if jobs == 0: [all …]
|