Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 18 of 18) sorted by relevance

/mm/
Dpercpu-vm.c14 unsigned int cpu, int page_idx) in pcpu_chunk_page() argument
19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page()
58 unsigned int cpu; in pcpu_free_pages() local
61 for_each_possible_cpu(cpu) { in pcpu_free_pages()
63 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
86 unsigned int cpu, tcpu; in pcpu_alloc_pages() local
89 for_each_possible_cpu(cpu) { in pcpu_alloc_pages()
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
93 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); in pcpu_alloc_pages()
102 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
[all …]
Dpercpu.c242 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) in pcpu_page_idx() argument
244 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; in pcpu_page_idx()
248 unsigned int cpu, int page_idx) in pcpu_chunk_addr() argument
250 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + in pcpu_chunk_addr()
879 int slot, off, new_alloc, cpu, ret; in pcpu_alloc() local
1026 for_each_possible_cpu(cpu) in pcpu_alloc()
1027 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); in pcpu_alloc()
1305 unsigned int cpu; in is_kernel_percpu_address() local
1307 for_each_possible_cpu(cpu) { in is_kernel_percpu_address()
1308 void *start = per_cpu_ptr(base, cpu); in is_kernel_percpu_address()
[all …]
Dvmstat.c39 int cpu; in sum_vm_events() local
44 for_each_online_cpu(cpu) { in sum_vm_events()
45 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); in sum_vm_events()
71 void vm_events_fold_cpu(int cpu) in vm_events_fold_cpu() argument
73 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); in vm_events_fold_cpu()
171 int cpu; in refresh_zone_stat_thresholds() local
179 for_each_online_cpu(cpu) in refresh_zone_stat_thresholds()
180 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in refresh_zone_stat_thresholds()
200 int cpu; in set_pgdat_percpu_threshold() local
210 for_each_online_cpu(cpu) in set_pgdat_percpu_threshold()
[all …]
Dzswap.c364 static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu) in __zswap_cpu_dstmem_notifier() argument
370 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); in __zswap_cpu_dstmem_notifier()
375 per_cpu(zswap_dstmem, cpu) = dst; in __zswap_cpu_dstmem_notifier()
379 dst = per_cpu(zswap_dstmem, cpu); in __zswap_cpu_dstmem_notifier()
381 per_cpu(zswap_dstmem, cpu) = NULL; in __zswap_cpu_dstmem_notifier()
401 unsigned long cpu; in zswap_cpu_dstmem_init() local
404 for_each_online_cpu(cpu) in zswap_cpu_dstmem_init()
405 if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) == in zswap_cpu_dstmem_init()
413 for_each_online_cpu(cpu) in zswap_cpu_dstmem_init()
414 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu); in zswap_cpu_dstmem_init()
[all …]
Dswap.c523 static void activate_page_drain(int cpu) in activate_page_drain() argument
525 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); in activate_page_drain()
531 static bool need_activate_page_drain(int cpu) in need_activate_page_drain() argument
533 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; in need_activate_page_drain()
549 static inline void activate_page_drain(int cpu) in activate_page_drain() argument
553 static bool need_activate_page_drain(int cpu) in need_activate_page_drain() argument
807 void lru_add_drain_cpu(int cpu) in lru_add_drain_cpu() argument
809 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); in lru_add_drain_cpu()
814 pvec = &per_cpu(lru_rotate_pvecs, cpu); in lru_add_drain_cpu()
824 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); in lru_add_drain_cpu()
[all …]
Dquicklist.c92 int cpu; in quicklist_total_size() local
95 for_each_online_cpu(cpu) { in quicklist_total_size()
96 ql = per_cpu(quicklist, cpu); in quicklist_total_size()
Dpage_alloc.c1964 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
1971 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
1988 static void drain_pages(unsigned int cpu) in drain_pages() argument
1993 drain_pages_zone(cpu, zone); in drain_pages()
2005 int cpu = smp_processor_id(); in drain_local_pages() local
2008 drain_pages_zone(cpu, zone); in drain_local_pages()
2010 drain_pages(cpu); in drain_local_pages()
2026 int cpu; in drain_all_pages() local
2040 for_each_online_cpu(cpu) { in drain_all_pages()
2046 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
[all …]
Dslab.c595 static void init_reap_node(int cpu) in init_reap_node() argument
599 node = next_node(cpu_to_mem(cpu), node_online_map); in init_reap_node()
603 per_cpu(slab_reap_node, cpu) = node; in init_reap_node()
617 #define init_reap_node(cpu) do { } while (0) argument
628 static void start_cpu_timer(int cpu) in start_cpu_timer() argument
630 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); in start_cpu_timer()
638 init_reap_node(cpu); in start_cpu_timer()
640 schedule_delayed_work_on(cpu, reap_work, in start_cpu_timer()
641 __round_jiffies_relative(HZ, cpu)); in start_cpu_timer()
1083 static void cpuup_canceled(long cpu) in cpuup_canceled() argument
[all …]
Dslub.c202 int cpu; /* Was running on cpu */ member
569 p->cpu = smp_processor_id(); in set_track()
591 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); in print_track()
1869 static inline unsigned int init_tid(int cpu) in init_tid() argument
1871 return cpu; in init_tid()
1900 int cpu; in init_kmem_cache_cpus() local
1902 for_each_possible_cpu(cpu) in init_kmem_cache_cpus()
1903 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus()
2195 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) in __flush_cpu_slab() argument
2197 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
[all …]
Dkmemleak.c885 unsigned int cpu; in early_alloc_percpu() local
888 for_each_possible_cpu(cpu) { in early_alloc_percpu()
889 log->ptr = per_cpu_ptr(ptr, cpu); in early_alloc_percpu()
932 unsigned int cpu; in kmemleak_alloc_percpu() local
941 for_each_possible_cpu(cpu) in kmemleak_alloc_percpu()
942 create_object((unsigned long)per_cpu_ptr(ptr, cpu), in kmemleak_alloc_percpu()
996 unsigned int cpu; in kmemleak_free_percpu() local
1001 for_each_possible_cpu(cpu) in kmemleak_free_percpu()
1003 cpu)); in kmemleak_free_percpu()
Dzsmalloc.c1157 int ret, cpu = (long)pcpu; in zs_cpu_notifier() local
1162 area = &per_cpu(zs_map_area, cpu); in zs_cpu_notifier()
1169 area = &per_cpu(zs_map_area, cpu); in zs_cpu_notifier()
1183 int cpu, uninitialized_var(ret); in zs_register_cpu_notifier() local
1188 for_each_online_cpu(cpu) { in zs_register_cpu_notifier()
1189 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); in zs_register_cpu_notifier()
1200 int cpu; in zs_unregister_cpu_notifier() local
1204 for_each_online_cpu(cpu) in zs_unregister_cpu_notifier()
1205 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); in zs_unregister_cpu_notifier()
Dmemory_hotplug.c1959 int cpu; in check_cpu_on_node() local
1961 for_each_present_cpu(cpu) { in check_cpu_on_node()
1962 if (cpu_to_node(cpu) == pgdat->node_id) in check_cpu_on_node()
1976 int cpu; in unmap_cpu_on_node() local
1978 for_each_possible_cpu(cpu) in unmap_cpu_on_node()
1979 if (cpu_to_node(cpu) == pgdat->node_id) in unmap_cpu_on_node()
1980 numa_clear_node(cpu); in unmap_cpu_on_node()
Dvmalloc.c895 static void purge_fragmented_blocks(int cpu) in purge_fragmented_blocks() argument
900 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); in purge_fragmented_blocks()
932 int cpu; in purge_fragmented_blocks_allcpus() local
934 for_each_possible_cpu(cpu) in purge_fragmented_blocks_allcpus()
935 purge_fragmented_blocks(cpu); in purge_fragmented_blocks_allcpus()
1047 int cpu; in vm_unmap_aliases() local
1053 for_each_possible_cpu(cpu) { in vm_unmap_aliases()
1054 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); in vm_unmap_aliases()
Dmemcontrol.c659 int cpu; in mem_cgroup_read_stat() local
662 for_each_possible_cpu(cpu) in mem_cgroup_read_stat()
663 val += per_cpu(memcg->stat->count[idx], cpu); in mem_cgroup_read_stat()
677 int cpu; in mem_cgroup_read_events() local
679 for_each_possible_cpu(cpu) in mem_cgroup_read_events()
680 val += per_cpu(memcg->stat->events[idx], cpu); in mem_cgroup_read_events()
1960 int cpu, curcpu; in drain_all_stock() local
1968 for_each_online_cpu(cpu) { in drain_all_stock()
1969 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); in drain_all_stock()
1978 if (cpu == curcpu) in drain_all_stock()
[all …]
Drmap.c599 int cpu; in try_to_unmap_flush() local
604 cpu = get_cpu(); in try_to_unmap_flush()
606 if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) { in try_to_unmap_flush()
612 if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) in try_to_unmap_flush()
Dcompaction.c1448 int cpu; in compact_zone() local
1453 cpu = get_cpu(); in compact_zone()
1454 lru_add_drain_cpu(cpu); in compact_zone()
Dmemory-failure.c1397 int cpu; in memory_failure_init() local
1399 for_each_possible_cpu(cpu) { in memory_failure_init()
1400 mf_cpu = &per_cpu(memory_failure_cpu, cpu); in memory_failure_init()
Dswapfile.c2485 int cpu; in SYSCALL_DEFINE2() local
2505 for_each_possible_cpu(cpu) { in SYSCALL_DEFINE2()
2507 cluster = per_cpu_ptr(p->percpu_cluster, cpu); in SYSCALL_DEFINE2()