/external/autotest/server/samples/ |
D | profilertest.srv | 3 ssh_hosts = [hosts.create_host(m) for m in machines] 8 machines, name): 18 at.host.hostname, "PROF_MASTER", str(machines))) 27 def wait_for_profilers(machines, timeout = 180): 31 sync_bar.rendezvous_servers("PROF_MASTER", *machines) 34 def start_profilers(machines, timeout = 180): 38 start_bar.rendezvous_servers("PROF_MASTER", *machines) 41 def stop_profilers(machines, timeout = 120): 43 stop_bar.rendezvous_servers("PROF_MASTER", *machines) 47 wait_for_profilers(machines) [all …]
|
D | run_test.srv | 8 print "usage: -t <test name> -m <machines> -l <log dir>" 38 print "Going to launch %s on %r with log dir of %s." % (test, machines, logdir) 39 parallel_simple(run, machines)
|
/external/autotest/server/ |
D | standalone_profiler.py | 31 def generate_test(machines, hostname, profilers, timeout_start, timeout_stop, argument 59 hostname, _PROF_MASTER, machines)) 68 def wait_for_profilers(machines, timeout=300): argument 71 sb.rendezvous_servers(_PROF_MASTER, *machines) 74 def start_profilers(machines, timeout=120): argument 77 sb.rendezvous_servers(_PROF_MASTER, *machines) 80 def stop_profilers(machines, timeout=120): argument 83 sb.rendezvous_servers(_PROF_MASTER, *machines) 86 def finish_profilers(machines, timeout=120): argument 89 sb.rendezvous_servers(_PROF_MASTER, *machines)
|
D | autoserv.py | 88 if parser.options.machines: 89 machines = parser.options.machines.replace(',', ' ').strip().split() 91 machines = [] 94 machines = [] 99 machines.append(m) 101 logging.debug('Machines: %s', ','.join(machines)) 103 if machines: 104 for machine in machines: 107 machines = list(set(machines)) 108 machines.sort() [all …]
|
D | autoserv | 88 if parser.options.machines: 89 machines = parser.options.machines.replace(',', ' ').strip().split() 91 machines = [] 94 machines = [] 99 machines.append(m) 101 logging.debug('Machines: %s', ','.join(machines)) 103 if machines: 104 for machine in machines: 107 machines = list(set(machines)) 108 machines.sort() [all …]
|
D | server_job.py | 233 def __init__(self, control, args, resultdir, label, user, machines, argument 299 self.machines = machines 324 'hostname' : ','.join(machines), 471 if not self.machines: 487 if not self.machines: 503 if not self.machines: 519 if not self.machines: 569 def _make_parallel_wrapper(self, function, machines, log): argument 578 if (machines and isinstance(machines, list) 579 and isinstance(machines[0], dict)): [all …]
|
D | base_utils.py | 191 def default_mappings(machines): argument 199 mach = machines[0] 201 if len(machines) > 1: 202 machines = machines[1:] 203 for machine in machines: 209 def form_ntuples_from_machines(machines, n=2, mapping_func=default_mappings): argument 214 (mappings, failures) = mapping_func(machines)
|
D | autoserv_utils.py | 18 def autoserv_run_job_command(autoserv_directory, machines, argument 79 if machines: 80 command += ['-m', machines]
|
/external/mesa3d/src/gallium/drivers/softpipe/ |
D | sp_compute.c | 112 struct tgsi_exec_machine **machines) in run_workgroup() argument 120 grp_hit_barrier |= cs_run(cs, g_w, g_h, g_d, machines[i], restart_threads); in run_workgroup() 174 struct tgsi_exec_machine **machines; in softpipe_launch_grid() local 193 machines = CALLOC(sizeof(struct tgsi_exec_machine *), num_threads_in_group); in softpipe_launch_grid() 194 if (!machines) { in softpipe_launch_grid() 204 machines[idx] = tgsi_exec_machine_create(PIPE_SHADER_COMPUTE); in softpipe_launch_grid() 206 machines[idx]->LocalMem = local_mem; in softpipe_launch_grid() 207 machines[idx]->LocalMemSize = cs->shader.req_local_mem; in softpipe_launch_grid() 208 cs_prepare(cs, machines[idx], in softpipe_launch_grid() 215 tgsi_exec_set_constant_buffers(machines[idx], PIPE_MAX_CONSTANT_BUFFERS, in softpipe_launch_grid() [all …]
|
/external/autotest/server/site_tests/p2p_EndToEndTest/ |
D | control | 26 The test runs over a set of N machines generating a random file in one of 27 them (called the "master") and sharing it with the rest of the machines. The 28 success condition of this test occurs when all the N machines have the same 43 peers=machines, 46 if len(machines) < 2: 47 raise error.TestError('At least two machines are needed for this test') 49 # The file ID shared among all test machines. 53 barrier = queue_barrier.QueueBarrier(len(machines)-1) 56 master = machines[0] 58 job.parallel_simple(run, machines)
|
/external/toolchain-utils/cros_utils/ |
D | locks.py | 13 def AcquireLock(machines, chromeos_root, timeout=1200): argument 20 afe_lock_machine.AFELockManager(machines, False, chromeos_root, 28 repr(machines), timeout, str(e))) 34 def ReleaseLock(machines, chromeos_root): argument 38 afe_lock_machine.AFELockManager(machines, False, chromeos_root, 43 (repr(machines), str(e)))
|
/external/squashfs-tools/RELEASE-READMEs/ |
D | README-2.0-AMD64 | 5 filesystems on amd64 machines. These filesystems work correctly on amd64 6 machines, but cannot be mounted on non-amd64 machines. Likewise, filesystems 7 generated on non amd64 machines could not be mounted on amd64 machines. 12 generated by previous releases will not be mountable on amd64 machines
|
/external/toolchain-utils/ |
D | afe_lock_machine.py | 15 from cros_utils import machines 128 self.machines = list(set(remotes)) or [] 130 if self.machines and self.AllLabMachines(): 144 if not self.machines: 145 self.machines = self.toolchain_lab_machines + self.GetAllNonlabMachines() 151 for m in self.machines: 167 if not machines.MachineIsPingable(machine, logging_level='none'): 169 if not machines.MachineIsPingable(cros_machine, logging_level='none'): 273 for m in self.machines: 301 for m in self.machines: [all …]
|
/external/toolchain-utils/automation/server/ |
D | machine_manager_test.py | 22 machines = self.machine_manager.GetMachines(mach_spec_list) 23 self.assertTrue(machines) 27 machines = self.machine_manager.GetMachines(mach_spec_list) 28 self.assertTrue(machines)
|
D | job_executer.py | 17 def __init__(self, job_to_execute, machines, listeners): argument 20 assert machines 24 self.machines = machines 108 self.job.machines = self.machines
|
D | machine_manager.py | 28 def __init__(self, machines): argument 29 self._machine_pool = machines 71 def ReturnMachines(self, machines): argument 73 for m in machines:
|
D | job_manager.py | 149 self.machine_manager.ReturnMachines(job_.machines) 182 machines = self.machine_manager.GetMachines(required_machines) 183 if not machines: 190 executer = JobExecuter(ready_job, machines, self.listeners)
|
/external/elfutils/libebl/ |
D | eblopenbackend.c | 58 } machines[] = variable 139 #define nmachines (sizeof (machines) / sizeof (machines[0])) 287 if ((emulation != NULL && strcmp (emulation, machines[cnt].emulation) == 0) in openbackend() 288 || (emulation == NULL && machines[cnt].em == machine)) in openbackend() 291 result->emulation = machines[cnt].emulation; in openbackend() 305 result->machine = machines[cnt].em; in openbackend() 306 result->class = machines[cnt].class; in openbackend() 307 result->data = machines[cnt].data; in openbackend() 325 machines[cnt].dsoname), in openbackend() 332 machines[cnt].dsoname), in openbackend() [all …]
|
/external/elfutils/tests/ |
D | saridx.c | 30 static const char *machines[] = variable 216 (ehdr.e_machine >= (sizeof (machines) in main() 217 / sizeof (machines[0])) in main() 218 || machines[ehdr.e_machine] == NULL) in main() 220 : machines[ehdr.e_machine]); in main()
|
/external/toolchain-utils/automation/common/ |
D | job.py | 65 self.machines = [] 96 res.extend(['%s' % machine for machine in self.machines]) 116 if len(self.machines) > 1: 117 for num, machine in enumerate(self.machines[1:]): 157 return self.machines[0]
|
/external/autotest/utils/ |
D | site_check_dut_usage.py | 88 machines = set() 92 machines.add(machine) 94 num_machines = len(machines) 107 for machine in machines:
|
/external/ltp/testcases/network/nfsv4/locks/ |
D | deploy_info | 6 Setting up lock test with several test machines. 19 Running lock test over several test machines. 21 Run test on already configured test machines.
|
/external/autotest/server/site_tests/hardware_StorageStress/ |
D | control.stress3 | 49 job.parallel_simple(run_hardware_storage_stress_suspend, machines) 50 job.parallel_simple(run_hardware_storage_stress, machines) 51 job.parallel_simple(run_hardware_storage_stress_reboot, machines)
|
D | control.stress | 49 job.parallel_simple(run_hardware_storage_stress_suspend, machines) 50 job.parallel_simple(run_hardware_storage_stress, machines) 51 job.parallel_simple(run_hardware_storage_stress_reboot, machines)
|
D | control.stress2 | 49 job.parallel_simple(run_hardware_storage_stress_suspend, machines) 50 job.parallel_simple(run_hardware_storage_stress, machines) 51 job.parallel_simple(run_hardware_storage_stress_reboot, machines)
|