Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 15 of 15) sorted by relevance

/mm/
Dpercpu-vm.c14 unsigned int cpu, int page_idx) in pcpu_chunk_page() argument
19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page()
58 unsigned int cpu; in pcpu_free_pages() local
61 for_each_possible_cpu(cpu) { in pcpu_free_pages()
63 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
86 unsigned int cpu, tcpu; in pcpu_alloc_pages() local
89 for_each_possible_cpu(cpu) { in pcpu_alloc_pages()
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
93 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); in pcpu_alloc_pages()
102 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
[all …]
Dpercpu.c241 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) in pcpu_page_idx() argument
243 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; in pcpu_page_idx()
247 unsigned int cpu, int page_idx) in pcpu_chunk_addr() argument
249 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + in pcpu_chunk_addr()
878 int slot, off, new_alloc, cpu, ret; in pcpu_alloc() local
1025 for_each_possible_cpu(cpu) in pcpu_alloc()
1026 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); in pcpu_alloc()
1304 unsigned int cpu; in is_kernel_percpu_address() local
1306 for_each_possible_cpu(cpu) { in is_kernel_percpu_address()
1307 void *start = per_cpu_ptr(base, cpu); in is_kernel_percpu_address()
[all …]
Dvmstat.c34 int cpu; in sum_vm_events() local
39 for_each_online_cpu(cpu) { in sum_vm_events()
40 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); in sum_vm_events()
66 void vm_events_fold_cpu(int cpu) in vm_events_fold_cpu() argument
68 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); in vm_events_fold_cpu()
166 int cpu; in refresh_zone_stat_thresholds() local
174 for_each_online_cpu(cpu) in refresh_zone_stat_thresholds()
175 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in refresh_zone_stat_thresholds()
195 int cpu; in set_pgdat_percpu_threshold() local
205 for_each_online_cpu(cpu) in set_pgdat_percpu_threshold()
[all …]
Dswap.c515 static void activate_page_drain(int cpu) in activate_page_drain() argument
517 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); in activate_page_drain()
523 static bool need_activate_page_drain(int cpu) in need_activate_page_drain() argument
525 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; in need_activate_page_drain()
541 static inline void activate_page_drain(int cpu) in activate_page_drain() argument
545 static bool need_activate_page_drain(int cpu) in need_activate_page_drain() argument
796 void lru_add_drain_cpu(int cpu) in lru_add_drain_cpu() argument
798 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); in lru_add_drain_cpu()
803 pvec = &per_cpu(lru_rotate_pvecs, cpu); in lru_add_drain_cpu()
813 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); in lru_add_drain_cpu()
[all …]
Dzswap.c342 static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu) in __zswap_cpu_notifier() argument
354 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm; in __zswap_cpu_notifier()
355 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); in __zswap_cpu_notifier()
359 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL; in __zswap_cpu_notifier()
362 per_cpu(zswap_dstmem, cpu) = dst; in __zswap_cpu_notifier()
366 tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu); in __zswap_cpu_notifier()
369 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL; in __zswap_cpu_notifier()
371 dst = per_cpu(zswap_dstmem, cpu); in __zswap_cpu_notifier()
373 per_cpu(zswap_dstmem, cpu) = NULL; in __zswap_cpu_notifier()
384 unsigned long cpu = (unsigned long)pcpu; in zswap_cpu_notifier() local
[all …]
Dslab.c610 static void init_reap_node(int cpu) in init_reap_node() argument
614 node = next_node(cpu_to_mem(cpu), node_online_map); in init_reap_node()
618 per_cpu(slab_reap_node, cpu) = node; in init_reap_node()
632 #define init_reap_node(cpu) do { } while (0) argument
643 static void start_cpu_timer(int cpu) in start_cpu_timer() argument
645 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); in start_cpu_timer()
653 init_reap_node(cpu); in start_cpu_timer()
655 schedule_delayed_work_on(cpu, reap_work, in start_cpu_timer()
656 __round_jiffies_relative(HZ, cpu)); in start_cpu_timer()
1081 static void cpuup_canceled(long cpu) in cpuup_canceled() argument
[all …]
Dquicklist.c92 int cpu; in quicklist_total_size() local
95 for_each_online_cpu(cpu) { in quicklist_total_size()
96 ql = per_cpu(quicklist, cpu); in quicklist_total_size()
Dslub.c201 int cpu; /* Was running on cpu */ member
545 p->cpu = smp_processor_id(); in set_track()
567 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); in print_track()
1773 static inline unsigned int init_tid(int cpu) in init_tid() argument
1775 return cpu; in init_tid()
1804 int cpu; in init_kmem_cache_cpus() local
1806 for_each_possible_cpu(cpu) in init_kmem_cache_cpus()
1807 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus()
2090 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) in __flush_cpu_slab() argument
2092 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
[all …]
Dpage_alloc.c1315 static void drain_pages(unsigned int cpu) in drain_pages() argument
1325 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages()
1355 int cpu; in drain_all_pages() local
1371 for_each_online_cpu(cpu) { in drain_all_pages()
1374 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
1381 cpumask_set_cpu(cpu, &cpus_with_pcps); in drain_all_pages()
1383 cpumask_clear_cpu(cpu, &cpus_with_pcps); in drain_all_pages()
3225 int cpu; in show_free_areas() local
3234 for_each_online_cpu(cpu) { in show_free_areas()
3237 pageset = per_cpu_ptr(zone->pageset, cpu); in show_free_areas()
[all …]
Dkmemleak.c871 unsigned int cpu; in early_alloc_percpu() local
874 for_each_possible_cpu(cpu) { in early_alloc_percpu()
875 log->ptr = per_cpu_ptr(ptr, cpu); in early_alloc_percpu()
918 unsigned int cpu; in kmemleak_alloc_percpu() local
927 for_each_possible_cpu(cpu) in kmemleak_alloc_percpu()
928 create_object((unsigned long)per_cpu_ptr(ptr, cpu), in kmemleak_alloc_percpu()
982 unsigned int cpu; in kmemleak_free_percpu() local
987 for_each_possible_cpu(cpu) in kmemleak_free_percpu()
989 cpu)); in kmemleak_free_percpu()
Dzsmalloc.c1152 int ret, cpu = (long)pcpu; in zs_cpu_notifier() local
1157 area = &per_cpu(zs_map_area, cpu); in zs_cpu_notifier()
1164 area = &per_cpu(zs_map_area, cpu); in zs_cpu_notifier()
1178 int cpu, uninitialized_var(ret); in zs_register_cpu_notifier() local
1183 for_each_online_cpu(cpu) { in zs_register_cpu_notifier()
1184 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); in zs_register_cpu_notifier()
1195 int cpu; in zs_unregister_cpu_notifier() local
1199 for_each_online_cpu(cpu) in zs_unregister_cpu_notifier()
1200 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); in zs_unregister_cpu_notifier()
Dmemcontrol.c869 int cpu; in mem_cgroup_read_stat() local
872 for_each_online_cpu(cpu) in mem_cgroup_read_stat()
873 val += per_cpu(memcg->stat->count[idx], cpu); in mem_cgroup_read_stat()
887 int cpu; in mem_cgroup_read_events() local
890 for_each_online_cpu(cpu) in mem_cgroup_read_events()
891 val += per_cpu(memcg->stat->events[idx], cpu); in mem_cgroup_read_events()
2364 int cpu; in memcg_stock_init() local
2366 for_each_possible_cpu(cpu) { in memcg_stock_init()
2368 &per_cpu(memcg_stock, cpu); in memcg_stock_init()
2396 int cpu, curcpu; in drain_all_stock() local
[all …]
Dmemory_hotplug.c1889 int cpu; in check_cpu_on_node() local
1891 for_each_present_cpu(cpu) { in check_cpu_on_node()
1892 if (cpu_to_node(cpu) == pgdat->node_id) in check_cpu_on_node()
1906 int cpu; in unmap_cpu_on_node() local
1908 for_each_possible_cpu(cpu) in unmap_cpu_on_node()
1909 if (cpu_to_node(cpu) == pgdat->node_id) in unmap_cpu_on_node()
1910 numa_clear_node(cpu); in unmap_cpu_on_node()
Dvmalloc.c863 static void purge_fragmented_blocks(int cpu) in purge_fragmented_blocks() argument
868 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); in purge_fragmented_blocks()
899 int cpu; in purge_fragmented_blocks_allcpus() local
901 for_each_possible_cpu(cpu) in purge_fragmented_blocks_allcpus()
902 purge_fragmented_blocks(cpu); in purge_fragmented_blocks_allcpus()
1015 int cpu; in vm_unmap_aliases() local
1021 for_each_possible_cpu(cpu) { in vm_unmap_aliases()
1022 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); in vm_unmap_aliases()
Dmemory-failure.c1379 int cpu; in memory_failure_init() local
1381 for_each_possible_cpu(cpu) { in memory_failure_init()
1382 mf_cpu = &per_cpu(memory_failure_cpu, cpu); in memory_failure_init()