Home
last modified time | relevance | path

Searched refs:nid (Results 1 – 5 of 5) sorted by relevance

/kernel/dma/
Dcontiguous.c103 int nid, count = 0; in early_numa_cma() local
114 nid = array_index_nospec(tmp, MAX_NUMNODES); in early_numa_cma()
118 numa_cma_size[nid] = tmp; in early_numa_cma()
161 int nid; in dma_numa_cma_reserve() local
163 for_each_node(nid) { in dma_numa_cma_reserve()
168 if (!node_online(nid)) { in dma_numa_cma_reserve()
169 if (pernuma_size_bytes || numa_cma_size[nid]) in dma_numa_cma_reserve()
170 pr_warn("invalid node %d specified\n", nid); in dma_numa_cma_reserve()
176 cma = &dma_contiguous_pernuma_area[nid]; in dma_numa_cma_reserve()
177 snprintf(name, sizeof(name), "pernuma%d", nid); in dma_numa_cma_reserve()
[all …]
/kernel/sched/
Dfair.c1685 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) in task_faults_idx() argument
1687 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; in task_faults_idx()
1690 static inline unsigned long task_faults(struct task_struct *p, int nid) in task_faults() argument
1695 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + in task_faults()
1696 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; in task_faults()
1699 static inline unsigned long group_faults(struct task_struct *p, int nid) in group_faults() argument
1706 return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + in group_faults()
1707 ng->faults[task_faults_idx(NUMA_MEM, nid, 1)]; in group_faults()
1710 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) in group_faults_cpu() argument
1712 return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] + in group_faults_cpu()
[all …]
Dcore.c3642 int nid = cpu_to_node(cpu); in select_fallback_rq() local
3656 if (nid != -1) { in select_fallback_rq()
3657 nodemask = cpumask_of_node(nid); in select_fallback_rq()
8925 void sched_setnuma(struct task_struct *p, int nid) in sched_setnuma() argument
8940 p->numa_preferred_nid = nid; in sched_setnuma()
/kernel/
Dpadata.c513 int nworks, nid; in padata_do_multithreaded() local
560 nid = next_node_in(old_node, node_states[N_CPU]); in padata_do_multithreaded()
561 } while (!atomic_try_cmpxchg(&last_used_nid, &old_node, nid)); in padata_do_multithreaded()
562 queue_work_node(nid, system_unbound_wq, &pw->pw_work); in padata_do_multithreaded()
/kernel/bpf/
Dsyscall.c485 int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid, in bpf_map_alloc_pages() argument
498 pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0); in bpf_map_alloc_pages()