Home
last modified time | relevance | path

Searched full:jobs (Results 1 – 25 of 808) sorted by relevance

12345678910>>...33

/external/autotest/tko/migrations/
D003_add_test_timestamps.py30 jobs.tag AS job_tag,
31 jobs.label AS job_label,
32 jobs.username AS job_username,
33 jobs.queued_time AS job_queued_time,
34 jobs.started_time AS job_started_time,
35 jobs.finished_time AS job_finished_time,
44 INNER JOIN jobs ON jobs.job_idx = tests.job_idx
45 INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
60 jobs.tag AS job_tag,
61 jobs.label AS job_label,
[all …]
D004_add_test_started.py31 jobs.tag AS job_tag,
32 jobs.label AS job_label,
33 jobs.username AS job_username,
34 jobs.queued_time AS job_queued_time,
35 jobs.started_time AS job_started_time,
36 jobs.finished_time AS job_finished_time,
45 INNER JOIN jobs ON jobs.job_idx = tests.job_idx
46 INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
62 jobs.tag AS job_tag,
63 jobs.label AS job_label,
[all …]
D002_add_job_timestamps.py12 ALTER TABLE jobs ADD COLUMN queued_time datetime NULL;
13 ALTER TABLE jobs ADD COLUMN started_time datetime NULL;
14 ALTER TABLE jobs ADD COLUMN finished_time datetime NULL;
19 ALTER TABLE jobs DROP queued_time, DROP started_time, DROP finished_time;
33 jobs.tag AS job_tag,
34 jobs.label AS job_label,
35 jobs.username AS job_username,
36 jobs.queued_time AS job_queued_time,
37 jobs.started_time AS job_started_time,
38 jobs.finished_time AS job_finished_time,
[all …]
D031_rename_tko_tables.py185 jobs.tag AS job_tag,
186 jobs.label AS job_label,
187 jobs.username AS job_username,
188 jobs.queued_time AS job_queued_time,
189 jobs.started_time AS job_started_time,
190 jobs.finished_time AS job_finished_time,
199 INNER JOIN jobs ON jobs.job_idx = tests.job_idx
200 INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
216 jobs.tag AS job_tag,
217 jobs.label AS job_label,
[all …]
D030_add_afe_job_id_to_jobs.py2 ALTER TABLE jobs
5 UPDATE jobs
10 ON jobs(afe_job_id);
23 jobs.tag AS job_tag,
24 jobs.label AS job_name,
25 jobs.username AS job_owner,
26 jobs.queued_time AS job_queued_time,
27 jobs.started_time AS job_started_time,
28 jobs.finished_time AS job_finished_time,
29 jobs.afe_job_id AS afe_job_id,
[all …]
D013_fix_perf_view.py21 jobs.tag AS job_tag,
22 jobs.label AS job_label,
23 jobs.username AS job_username,
24 jobs.queued_time AS job_queued_time,
25 jobs.started_time AS job_started_time,
26 jobs.finished_time AS job_finished_time,
38 INNER JOIN jobs ON jobs.job_idx = tests.job_idx
39 INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
57 jobs.tag AS job_tag,
58 jobs.label AS job_label,
[all …]
D015_support_graphing_interface.py39 jobs.tag AS job_tag,
40 jobs.label AS job_name,
41 jobs.username AS job_owner,
42 jobs.queued_time AS job_queued_time,
43 jobs.started_time AS job_started_time,
44 jobs.finished_time AS job_finished_time,
53 LEFT OUTER JOIN jobs ON jobs.job_idx = tests.job_idx
54 LEFT OUTER JOIN machines ON machines.machine_idx = jobs.machine_idx
75 jobs.tag AS job_tag,
76 jobs.label AS job_name,
[all …]
D014_add_test_view_2.py13 jobs.tag AS job_tag,
14 jobs.label AS job_name,
15 jobs.username AS job_owner,
16 jobs.queued_time AS job_queued_time,
17 jobs.started_time AS job_started_time,
18 jobs.finished_time AS job_finished_time,
27 INNER JOIN jobs ON jobs.job_idx = tests.job_idx
28 INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
D016_modify_perf_view_2.py23 jobs.tag AS job_tag,
24 jobs.label AS job_name,
25 jobs.username AS job_owner,
26 jobs.queued_time AS job_queued_time,
27 jobs.started_time AS job_started_time,
28 jobs.finished_time AS job_finished_time,
40 LEFT OUTER JOIN jobs ON jobs.job_idx = tests.job_idx
41 LEFT OUTER JOIN machines ON machines.machine_idx = jobs.machine_idx
/external/skia/bin/
Dtry.py9 """Submit one or more try jobs."""
26 JOBS_JSON = os.path.join(INFRA_BOTS, 'jobs.json')
37 """Obtain the list of jobs from the given repo."""
48 jobs = json.loads(subprocess.check_output([
49 utils.GIT, 'show', 'master:infra/bots/jobs.json']))
50 return (BUCKET_SKIA_INTERNAL, jobs)
55 d = 'Helper script for triggering try jobs defined in %s.' % JOBS_JSON
58 help='Just list the jobs; do not trigger anything.')
60 help=('If set, include internal jobs. You must have '
66 # Load and filter the list of jobs.
[all …]
/external/skqp/bin/
Dtry.py9 """Submit one or more try jobs."""
26 JOBS_JSON = os.path.join(INFRA_BOTS, 'jobs.json')
37 """Obtain the list of jobs from the given repo."""
48 jobs = json.loads(subprocess.check_output([
49 utils.GIT, 'show', 'master:infra/bots/jobs.json']))
50 return (BUCKET_SKIA_INTERNAL, jobs)
55 d = 'Helper script for triggering try jobs defined in %s.' % JOBS_JSON
58 help='Just list the jobs; do not trigger anything.')
60 help=('If set, include internal jobs. You must have '
66 # Load and filter the list of jobs.
[all …]
/external/autotest/scheduler/shard/
Dshard_client.py46 master in a heartbeat, retrieves new jobs and hosts and inserts them into the
57 - This is to set the status of jobs to completed in the master database after
59 master's afe to see the statuses of all jobs. Otherwise one would have to
67 5. Assign jobs that:
74 6. Serialize the chosen jobs and hosts.
84 2. monitor_db on the shard will pick up these jobs and schedule them on the
87 4. The shard_client will pick up all jobs where shard_id=NULL and will
92 The heartbeat request will also contain the ids of incomplete jobs and the
105 # The maximum number of jobs to attempt to upload in a single heartbeat.
115 to retrieve new jobs from it and to report completed jobs back.
[all …]
Dsimple_heartbeat_client.py22 and jobs to 100. This is useful for debugging issues with only jobs/hosts.
66 @param job_limit: The number of jobs to include in the heartbeat.
85 jobs and hosts.
91 [models.Job.deserialize(j) for j in response['jobs']]
100 @param job_limit: Limit number of jobs retrieved.
107 print ('Jobs: %s, Hosts: %s' %
108 (len(response['jobs']), len(response['hosts'])))
126 help='Limit jobs in the heartbeat.')
/external/autotest/frontend/migrations/
D022_implement_sync_count.py2 ALTER TABLE jobs ADD COLUMN synchronizing tinyint(1) default NULL;
6 ALTER TABLE jobs ADD COLUMN synch_type int(11) default NULL;
7 UPDATE jobs SET synch_type = 1;
8 UPDATE jobs SET synch_type = 2 WHERE synch_count > 1;
19 SELECT jobs.id, jobs.synch_type, COUNT(1) FROM jobs
20 INNER JOIN host_queue_entries AS hqe ON jobs.id = hqe.job_id
21 GROUP BY jobs.id""")
39 manager.execute('UPDATE jobs SET synch_count = 1 WHERE synch_type = 1')
40 manager.execute('UPDATE jobs SET synch_count = 2 '
43 manager.execute('ALTER TABLE jobs DROP COLUMN synch_type')
[all …]
/external/skia/site/dev/testing/
Dautomated_testing.md19 may automatically retry tasks within its set limits. Jobs are not retried.
20 Multiple jobs may share the same task, for example, tests on two different
23 Each Skia repository has an `infra/bots/tasks.json` file which defines the jobs
24 and tasks for the repo. Most jobs will run at every commit, but it is possible
25 to specify nightly and weekly jobs as well. For convenience, most repos also
41 Try Jobs
45 repo. You need to have permission to trigger try jobs; if you need permission,
52 or using `bin/try`, a small wrapper for `git cl try` which helps to choose try jobs.
96 <a name="adding-new-jobs"></a>
97 Adding new jobs
[all …]
/external/skqp/site/dev/testing/
Dautomated_testing.md19 may automatically retry tasks within its set limits. Jobs are not retried.
20 Multiple jobs may share the same task, for example, tests on two different
23 Each Skia repository has an `infra/bots/tasks.json` file which defines the jobs
24 and tasks for the repo. Most jobs will run at every commit, but it is possible
25 to specify nightly and weekly jobs as well. For convenience, most repos also
41 Try Jobs
45 repo. You need to have permission to trigger try jobs; if you need permission,
52 or using `bin/try`, a small wrapper for `git cl try` which helps to choose try jobs.
96 <a name="adding-new-jobs"></a>
97 Adding new jobs
[all …]
/external/autotest/frontend/client/src/autotest/afe/
DJobListView.java42 private static final String[] statusRadioButtonLabels = {"Queued Jobs", "Running Jobs",
43 "Finished Jobs", "All Jobs"};
47 private static final String[] typeRadioButtonLabels = {"Parent Jobs", "Child Jobs",
48 "All Jobs"};
123 NotifyManager.getInstance().showError("No jobs selected"); in abortSelectedJobs()
189 // All Jobs is selected by default in initialize()
202 // All Jobs is selected by default in initialize()
210 addWidget(new ToolTip("?", "Suite jobs: jobs with child jobs. " + in initialize()
211 "Sub jobs: jobs with a parent jobs. "), in initialize()
259 menu.addItem("Abort jobs", new Command() { in getActionMenu()
/external/mesa3d/src/util/
Du_queue.c262 job = queue->jobs[queue->read_idx]; in util_queue_thread_func()
263 memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job)); in util_queue_thread_func()
278 /* signal remaining jobs before terminating */ in util_queue_thread_func()
282 if (queue->jobs[i].job) { in util_queue_thread_func()
283 util_queue_fence_signal(queue->jobs[i].fence); in util_queue_thread_func()
284 queue->jobs[i].job = NULL; in util_queue_thread_func()
308 queue->jobs = (struct util_queue_job*) in util_queue_init()
310 if (!queue->jobs) in util_queue_init()
367 if (queue->jobs) { in util_queue_init()
371 free(queue->jobs); in util_queue_init()
[all …]
/external/libcups/cups/
Dutil.c33 * Pass @code CUPS_JOBID_ALL@ to cancel all jobs or @code CUPS_JOBID_CURRENT@
44 …I - Job ID, @code CUPS_JOBID_CURRENT@ for the current job, or @code CUPS_JOBID_ALL@ for all jobs */ in cupsCancelJob()
54 * Canceled jobs remain in the job history while purged jobs are removed
57 * Pass @code CUPS_JOBID_ALL@ to cancel all jobs or @code CUPS_JOBID_CURRENT@
69 …I - Job ID, @code CUPS_JOBID_CURRENT@ for the current job, or @code CUPS_JOBID_ALL@ for all jobs */ in cupsCancelJob2()
102 * [purge-job] or [purge-jobs] in cupsCancelJob2()
119 snprintf(uri, sizeof(uri), "ipp://localhost/jobs/%d", job_id); in cupsCancelJob2()
130 ippAddBoolean(request, IPP_TAG_OPERATION, "purge-jobs", 0); in cupsCancelJob2()
136 ippDelete(cupsDoRequest(http, request, "/jobs/")); in cupsCancelJob2()
248 cupsFreeJobs(int num_jobs, /* I - Number of jobs */ in cupsFreeJobs()
[all …]
/external/grpc-grpc/tools/profiling/microbenchmarks/bm_diff/
Dbm_build.py38 '--jobs',
57 def _make_cmd(cfg, benchmarks, jobs): argument
58 return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
61 def build(name, benchmarks, jobs, counters): argument
65 subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
67 subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
70 subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
72 subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
81 build(args.name, args.benchmarks, args.jobs, args.counters)
/external/autotest/venv/lucifer/cmd/
Djob_aborter.py5 """Monitor jobs and abort them as necessary.
87 """Mark expired jobs failed.
89 Expired jobs are jobs that have an incomplete JobHandoff and that do
90 not have an active lease. These jobs have been handed off to a
91 job_reporter, but that job_reporter has crashed. These jobs are
97 logger.debug('Looking for expired jobs')
111 """Send abort to timed out jobs.
122 """Send abort to jobs marked aborting in Autotest database.
134 # would abort jobs running on the behalf of special tasks and thus
151 """Return a QuerySet of timed out Jobs.
[all …]
/external/mesa3d/src/intel/vulkan/tests/
Dstate_pool_no_free.c37 } jobs[NUM_THREADS]; variable
69 jobs[i].pool = &state_pool; in run_test()
70 jobs[i].id = i; in run_test()
71 pthread_create(&jobs[i].thread, NULL, alloc_states, &jobs[i]); in run_test()
75 pthread_join(jobs[i].thread, NULL); in run_test()
90 if (thread_max < jobs[i].offsets[next[i]]) { in run_test()
91 thread_max = jobs[i].offsets[next[i]]; in run_test()
103 assert(jobs[max_thread_idx].offsets[next[max_thread_idx]] > highest); in run_test()
105 highest = jobs[max_thread_idx].offsets[next[max_thread_idx]]; in run_test()
/external/autotest/scheduler/
Dhost_scheduler_unittests.py26 """Verify scheduler behavior when pending jobs are already given hosts."""
40 # Check that only_hostless=False pulls new jobs, as always.
55 # Check that we only pull jobs which are not assigned to a shard.
170 """Verify scheduler behavior when pending jobs are already given hosts."""
291 # We have 4 hosts, 5 jobs, one job in the second suite won't
343 jobs = self.create_suite(num=2)
345 params=(jobs[0].id,))[0]
347 hqe = self.assign_host_to_job(host1, jobs[0], r)
348 self.verify_state(r, {jobs['parent_job'].id:1},
349 {host1.id: jobs['parent_job'].id})
[all …]
/external/swiftshader/third_party/LLVM/utils/
Dllvmbuild99 # The user may control parallelism via the --jobs and --threads
100 # switches. --jobs tells llvmbuild the maximum total number of builds
104 # than --jobs, --threads workers will be launched and each one will
106 # will invoke GNU make with -j (--jobs / --threads) to use up the
177 parser.add_option("--jobs", "-j", default=8, type="int",
178 help=("The number of simultaneous build jobs "
300 def __init__(self, work_queue, jobs,
305 self.jobs = jobs
517 llvm=dict(debug=["-j" + str(self.jobs)],
518 release=["-j" + str(self.jobs)],
[all …]
/external/toolchain-utils/automation/server/
Djob_group_manager.py57 for job_ in group.jobs:
68 self._logger.debug('Killing all jobs that belong to %r.', group)
70 for job_ in group.jobs:
73 self._logger.debug('Waiting for jobs to quit.')
93 for job_ in group.jobs:
94 # TODO(bjanakiraman): We should probably only kill dependent jobs
102 for other_job in group.jobs:
115 for job_ in group.jobs:

12345678910>>...33