• Home
  • Raw
  • Download

Lines Matching full:g

48 #define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
54 #define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
162 static struct global_info *g = NULL; variable
173 OPT_STRING('G', "mb_global" , &p0.mb_global_str, "MB", "global memory (MBs)"),
230 for (i = 0; i < g->p.nr_nodes; i++) { in nr_numa_nodes()
277 for (cpu = 0; cpu < g->p.nr_cpus; cpu++) in bind_to_cpu()
280 BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus); in bind_to_cpu()
292 int cpus_per_node = g->p.nr_cpus / nr_numa_nodes(); in bind_to_node()
297 BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus); in bind_to_node()
306 for (cpu = 0; cpu < g->p.nr_cpus; cpu++) in bind_to_node()
312 BUG_ON(cpu_stop > g->p.nr_cpus); in bind_to_node()
336 ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1); in mempol_restore()
349 BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8); in bind_to_memnode()
395 if (ret && !g->print_once) { in alloc_data()
396 g->print_once = 1; in alloc_data()
402 if (ret && !g->print_once) { in alloc_data()
403 g->print_once = 1; in alloc_data()
451 return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0, g->p.thp, g->p.init_random); in zalloc_shared_data()
459 return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0, g->p.thp, g->p.init_random); in setup_shared_data()
468 return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0, g->p.thp, g->p.init_random); in setup_private_data()
498 if (!g->p.cpu_list_str) in parse_setup_cpu_list()
501 dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); in parse_setup_cpu_list()
503 str0 = str = strdup(g->p.cpu_list_str); in parse_setup_cpu_list()
538 BUG_ON(step <= 0 || step >= g->p.nr_cpus); in parse_setup_cpu_list()
550 BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus); in parse_setup_cpu_list()
563 if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) { in parse_setup_cpu_list()
564 printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus); in parse_setup_cpu_list()
577 if (t >= g->p.nr_tasks) { in parse_setup_cpu_list()
581 td = g->threads + t; in parse_setup_cpu_list()
593 BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus); in parse_setup_cpu_list()
604 if (t < g->p.nr_tasks) in parse_setup_cpu_list()
605 printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t); in parse_setup_cpu_list()
635 if (!g->p.node_list_str) in parse_setup_node_list()
638 dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); in parse_setup_node_list()
640 str0 = str = strdup(g->p.node_list_str); in parse_setup_node_list()
674 BUG_ON(step <= 0 || step >= g->p.nr_nodes); in parse_setup_node_list()
687 if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) { in parse_setup_node_list()
688 printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes); in parse_setup_node_list()
699 if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) { in parse_setup_node_list()
703 td = g->threads + t; in parse_setup_node_list()
719 if (t < g->p.nr_tasks) in parse_setup_node_list()
720 printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t); in parse_setup_node_list()
753 if (g->p.data_reads) in access_data()
755 if (g->p.data_writes) in access_data()
784 if (g->p.data_zero_memset && !g->p.data_rand_walk) { in do_work()
791 chunk_1 = words/g->p.nr_loops; in do_work()
797 if (g->p.data_rand_walk) { in do_work()
809 if (g->p.data_zero_memset) { in do_work()
816 } else if (!g->p.data_backwards || (nr + loop) & 1) { in do_work()
862 g->threads[task_nr].curr_cpu = cpu; in update_curr_cpu()
873 * to a single node. A count of g->p.nr_nodes means it's
882 for (t = 0; t < g->p.nr_threads; t++) { in count_process_nodes()
887 task_nr = process_nr*g->p.nr_threads + t; in count_process_nodes()
888 td = g->threads + task_nr; in count_process_nodes()
917 for (p = 0; p < g->p.nr_proc; p++) { in count_node_processes()
918 for (t = 0; t < g->p.nr_threads; t++) { in count_node_processes()
923 task_nr = p*g->p.nr_threads + t; in count_node_processes()
924 td = g->threads + task_nr; in count_node_processes()
945 for (p = 0; p < g->p.nr_proc; p++) { in calc_convergence_compression()
981 if (!g->p.show_convergence && !g->p.measure_convergence) in calc_convergence()
984 for (node = 0; node < g->p.nr_nodes; node++) in calc_convergence()
990 for (t = 0; t < g->p.nr_tasks; t++) { in calc_convergence()
991 struct thread_data *td = g->threads + t; in calc_convergence()
1010 nr_min = g->p.nr_tasks; in calc_convergence()
1013 for (node = 0; node < g->p.nr_nodes; node++) { in calc_convergence()
1023 BUG_ON(sum > g->p.nr_tasks); in calc_convergence()
1025 if (0 && (sum < g->p.nr_tasks)) in calc_convergence()
1031 * to g->p.nr_proc: in calc_convergence()
1035 for (node = 0; node < g->p.nr_nodes; node++) { in calc_convergence()
1062 if (strong && process_groups == g->p.nr_proc) { in calc_convergence()
1066 if (g->p.measure_convergence) { in calc_convergence()
1067 g->all_converged = true; in calc_convergence()
1068 g->stop_work = true; in calc_convergence()
1083 (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0); in show_summary()
1087 if (g->p.show_details >= 0) in show_summary()
1099 int details = g->p.show_details; in worker_thread()
1117 global_data = g->data; in worker_thread()
1119 thread_data = setup_private_data(g->p.bytes_thread); in worker_thread()
1124 if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1) in worker_thread()
1136 if (g->p.serialize_startup) { in worker_thread()
1137 pthread_mutex_lock(&g->startup_mutex); in worker_thread()
1138 g->nr_tasks_started++; in worker_thread()
1139 pthread_mutex_unlock(&g->startup_mutex); in worker_thread()
1142 pthread_mutex_lock(&g->start_work_mutex); in worker_thread()
1143 g->nr_tasks_working++; in worker_thread()
1146 if (g->nr_tasks_working == g->p.nr_tasks) in worker_thread()
1147 pthread_mutex_unlock(&g->startup_done_mutex); in worker_thread()
1149 pthread_mutex_unlock(&g->start_work_mutex); in worker_thread()
1157 for (l = 0; l < g->p.nr_loops; l++) { in worker_thread()
1160 if (g->stop_work) in worker_thread()
1163 val += do_work(global_data, g->p.bytes_global, process_nr, g->p.nr_proc, l, val); in worker_thread()
1164 val += do_work(process_data, g->p.bytes_process, thread_nr, g->p.nr_threads, l, val); in worker_thread()
1165 val += do_work(thread_data, g->p.bytes_thread, 0, 1, l, val); in worker_thread()
1167 if (g->p.sleep_usecs) { in worker_thread()
1169 usleep(g->p.sleep_usecs); in worker_thread()
1175 if (g->p.bytes_process_locked) { in worker_thread()
1177 val += do_work(process_data, g->p.bytes_process_locked, thread_nr, g->p.nr_threads, l, val); in worker_thread()
1181 work_done = g->p.bytes_global + g->p.bytes_process + in worker_thread()
1182 g->p.bytes_process_locked + g->p.bytes_thread; in worker_thread()
1187 if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs) in worker_thread()
1195 if (g->p.nr_secs) { in worker_thread()
1197 if ((u32)diff.tv_sec >= g->p.nr_secs) { in worker_thread()
1198 g->stop_work = true; in worker_thread()
1208 * Perturb the first task's equilibrium every g->p.perturb_secs seconds, in worker_thread()
1211 …if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs)… in worker_thread()
1223 this_cpu = g->threads[task_nr].curr_cpu; in worker_thread()
1224 if (this_cpu < g->p.nr_cpus/2) in worker_thread()
1225 target_cpu = g->p.nr_cpus-1; in worker_thread()
1272 free_data(thread_data, g->p.bytes_thread); in worker_thread()
1274 pthread_mutex_lock(&g->stop_work_mutex); in worker_thread()
1275 g->bytes_done += bytes_done; in worker_thread()
1276 pthread_mutex_unlock(&g->stop_work_mutex); in worker_thread()
1301 task_nr = process_nr*g->p.nr_threads; in worker_process()
1302 td = g->threads + task_nr; in worker_process()
1307 pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t)); in worker_process()
1308 process_data = setup_private_data(g->p.bytes_process); in worker_process()
1310 if (g->p.show_details >= 3) { in worker_process()
1312 process_nr, g->data, process_data); in worker_process()
1315 for (t = 0; t < g->p.nr_threads; t++) { in worker_process()
1316 task_nr = process_nr*g->p.nr_threads + t; in worker_process()
1317 td = g->threads + task_nr; in worker_process()
1331 for (t = 0; t < g->p.nr_threads; t++) { in worker_process()
1336 free_data(process_data, g->p.bytes_process); in worker_process()
1342 if (g->p.show_details < 0) in print_summary()
1347 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus); in print_summary()
1349 g->p.nr_loops, g->p.bytes_global/1024/1024); in print_summary()
1351 g->p.nr_loops, g->p.bytes_process/1024/1024); in print_summary()
1353 g->p.nr_loops, g->p.bytes_thread/1024/1024); in print_summary()
1362 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; in init_thread_data()
1365 g->threads = zalloc_shared_data(size); in init_thread_data()
1367 for (t = 0; t < g->p.nr_tasks; t++) { in init_thread_data()
1368 struct thread_data *td = g->threads + t; in init_thread_data()
1376 for (cpu = 0; cpu < g->p.nr_cpus; cpu++) in init_thread_data()
1383 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; in deinit_thread_data()
1385 free_data(g->threads, size); in deinit_thread_data()
1390 g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0); in init()
1393 g->p = p0; in init()
1395 g->p.nr_cpus = numa_num_configured_cpus(); in init()
1397 g->p.nr_nodes = numa_max_node() + 1; in init()
1400 BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0); in init()
1402 if (g->p.show_quiet && !g->p.show_details) in init()
1403 g->p.show_details = -1; in init()
1406 if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str) in init()
1409 if (g->p.mb_global_str) { in init()
1410 g->p.mb_global = atof(g->p.mb_global_str); in init()
1411 BUG_ON(g->p.mb_global < 0); in init()
1414 if (g->p.mb_proc_str) { in init()
1415 g->p.mb_proc = atof(g->p.mb_proc_str); in init()
1416 BUG_ON(g->p.mb_proc < 0); in init()
1419 if (g->p.mb_proc_locked_str) { in init()
1420 g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str); in init()
1421 BUG_ON(g->p.mb_proc_locked < 0); in init()
1422 BUG_ON(g->p.mb_proc_locked > g->p.mb_proc); in init()
1425 if (g->p.mb_thread_str) { in init()
1426 g->p.mb_thread = atof(g->p.mb_thread_str); in init()
1427 BUG_ON(g->p.mb_thread < 0); in init()
1430 BUG_ON(g->p.nr_threads <= 0); in init()
1431 BUG_ON(g->p.nr_proc <= 0); in init()
1433 g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads; in init()
1435 g->p.bytes_global = g->p.mb_global *1024L*1024L; in init()
1436 g->p.bytes_process = g->p.mb_proc *1024L*1024L; in init()
1437 g->p.bytes_process_locked = g->p.mb_proc_locked *1024L*1024L; in init()
1438 g->p.bytes_thread = g->p.mb_thread *1024L*1024L; in init()
1440 g->data = setup_shared_data(g->p.bytes_global); in init()
1443 init_global_mutex(&g->start_work_mutex); in init()
1444 init_global_mutex(&g->startup_mutex); in init()
1445 init_global_mutex(&g->startup_done_mutex); in init()
1446 init_global_mutex(&g->stop_work_mutex); in init()
1462 free_data(g->data, g->p.bytes_global); in deinit()
1463 g->data = NULL; in deinit()
1467 free_data(g, sizeof(*g)); in deinit()
1468 g = NULL; in deinit()
1480 if (!g->p.show_quiet) in print_res()
1502 pids = zalloc(g->p.nr_proc * sizeof(*pids)); in __bench_numa()
1506 pthread_mutex_lock(&g->start_work_mutex); in __bench_numa()
1508 if (g->p.serialize_startup) { in __bench_numa()
1515 for (i = 0; i < g->p.nr_proc; i++) { in __bench_numa()
1530 while (g->nr_tasks_started != g->p.nr_tasks) in __bench_numa()
1533 BUG_ON(g->nr_tasks_started != g->p.nr_tasks); in __bench_numa()
1535 if (g->p.serialize_startup) { in __bench_numa()
1538 pthread_mutex_lock(&g->startup_done_mutex); in __bench_numa()
1541 pthread_mutex_unlock(&g->start_work_mutex); in __bench_numa()
1544 pthread_mutex_lock(&g->startup_done_mutex); in __bench_numa()
1558 pthread_mutex_unlock(&g->startup_done_mutex); in __bench_numa()
1566 for (i = 0; i < g->p.nr_proc; i++) { in __bench_numa()
1576 for (t = 0; t < g->p.nr_tasks; t++) { in __bench_numa()
1577 u64 thread_runtime_ns = g->threads[t].runtime_ns; in __bench_numa()
1597 bytes = g->bytes_done; in __bench_numa()
1598 runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC; in __bench_numa()
1600 if (g->p.measure_convergence) { in __bench_numa()
1618 print_res(name, bytes / g->p.nr_tasks / 1e9, in __bench_numa()
1624 print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks), in __bench_numa()
1627 print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max, in __bench_numa()
1633 if (g->p.show_details >= 2) { in __bench_numa()
1636 for (p = 0; p < g->p.nr_proc; p++) { in __bench_numa()
1637 for (t = 0; t < g->p.nr_threads; t++) { in __bench_numa()
1639 td = g->threads + p*g->p.nr_threads + t; in __bench_numa()